ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40222fff5186e7c77a3467f2e40b4509abbc491 | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from ... import _utilities, _tables
from ... import core as _core
from ... import meta as _meta
__all__ = [
'JobArgs',
'JobConditionArgs',
'JobSpecArgs',
'JobStatusArgs',
]
@pulumi.input_type
class JobArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['JobSpecArgs']] = None,
status: Optional[pulumi.Input['JobStatusArgs']] = None):
"""
Job represents the configuration of a single job.
This resource waits until its status is ready before registering success
for create/update, and populating output properties from the current state of the resource.
The following conditions are used to determine whether the resource creation has
succeeded or failed:
1. The Job's '.status.startTime' is set, which indicates that the Job has started running.
2. The Job's '.status.conditions' has a status of type 'Complete', and a 'status' set
to 'True'.
3. The Job's '.status.conditions' do not have a status of type 'Failed', with a
'status' set to 'True'. If this condition is set, we should fail the Job immediately.
If the Job has not reached a Ready state after 10 minutes, it will
time out and mark the resource update as Failed. You can override the default timeout value
by setting the 'customTimeouts' option on the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['JobSpecArgs'] spec: Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
:param pulumi.Input['JobStatusArgs'] status: Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'batch/v1')
if kind is not None:
pulumi.set(__self__, "kind", 'Job')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['JobSpecArgs']]:
"""
Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['JobSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['JobStatusArgs']]:
"""
Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['JobStatusArgs']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class JobConditionArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_probe_time: Optional[pulumi.Input[str]] = None,
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
JobCondition describes current state of a job.
:param pulumi.Input[str] status: Status of the condition, one of True, False, Unknown.
:param pulumi.Input[str] type: Type of job condition, Complete or Failed.
:param pulumi.Input[str] last_probe_time: Last time the condition was checked.
:param pulumi.Input[str] last_transition_time: Last time the condition transit from one status to another.
:param pulumi.Input[str] message: Human readable message indicating details about last transition.
:param pulumi.Input[str] reason: (brief) reason for the condition's last transition.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_probe_time is not None:
pulumi.set(__self__, "last_probe_time", last_probe_time)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
Status of the condition, one of True, False, Unknown.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of job condition, Complete or Failed.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastProbeTime")
def last_probe_time(self) -> Optional[pulumi.Input[str]]:
"""
Last time the condition was checked.
"""
return pulumi.get(self, "last_probe_time")
@last_probe_time.setter
def last_probe_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_probe_time", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
"""
Last time the condition transit from one status to another.
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Human readable message indicating details about last transition.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
(brief) reason for the condition's last transition.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class JobSpecArgs:
def __init__(__self__, *,
template: pulumi.Input['_core.v1.PodTemplateSpecArgs'],
active_deadline_seconds: Optional[pulumi.Input[float]] = None,
backoff_limit: Optional[pulumi.Input[float]] = None,
completions: Optional[pulumi.Input[float]] = None,
manual_selector: Optional[pulumi.Input[bool]] = None,
parallelism: Optional[pulumi.Input[float]] = None,
selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None,
ttl_seconds_after_finished: Optional[pulumi.Input[float]] = None):
"""
JobSpec describes how the job execution will look like.
:param pulumi.Input['_core.v1.PodTemplateSpecArgs'] template: Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input[float] active_deadline_seconds: Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer
:param pulumi.Input[float] backoff_limit: Specifies the number of retries before marking this job failed. Defaults to 6
:param pulumi.Input[float] completions: Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input[bool] manual_selector: manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
:param pulumi.Input[float] parallelism: Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param pulumi.Input[float] ttl_seconds_after_finished: ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.
"""
pulumi.set(__self__, "template", template)
if active_deadline_seconds is not None:
pulumi.set(__self__, "active_deadline_seconds", active_deadline_seconds)
if backoff_limit is not None:
pulumi.set(__self__, "backoff_limit", backoff_limit)
if completions is not None:
pulumi.set(__self__, "completions", completions)
if manual_selector is not None:
pulumi.set(__self__, "manual_selector", manual_selector)
if parallelism is not None:
pulumi.set(__self__, "parallelism", parallelism)
if selector is not None:
pulumi.set(__self__, "selector", selector)
if ttl_seconds_after_finished is not None:
pulumi.set(__self__, "ttl_seconds_after_finished", ttl_seconds_after_finished)
@property
@pulumi.getter
def template(self) -> pulumi.Input['_core.v1.PodTemplateSpecArgs']:
"""
Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "template")
@template.setter
def template(self, value: pulumi.Input['_core.v1.PodTemplateSpecArgs']):
pulumi.set(self, "template", value)
@property
@pulumi.getter(name="activeDeadlineSeconds")
def active_deadline_seconds(self) -> Optional[pulumi.Input[float]]:
"""
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer
"""
return pulumi.get(self, "active_deadline_seconds")
@active_deadline_seconds.setter
def active_deadline_seconds(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "active_deadline_seconds", value)
@property
@pulumi.getter(name="backoffLimit")
def backoff_limit(self) -> Optional[pulumi.Input[float]]:
"""
Specifies the number of retries before marking this job failed. Defaults to 6
"""
return pulumi.get(self, "backoff_limit")
@backoff_limit.setter
def backoff_limit(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "backoff_limit", value)
@property
@pulumi.getter
def completions(self) -> Optional[pulumi.Input[float]]:
"""
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "completions")
@completions.setter
def completions(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "completions", value)
@property
@pulumi.getter(name="manualSelector")
def manual_selector(self) -> Optional[pulumi.Input[bool]]:
"""
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
"""
return pulumi.get(self, "manual_selector")
@manual_selector.setter
def manual_selector(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "manual_selector", value)
@property
@pulumi.getter
def parallelism(self) -> Optional[pulumi.Input[float]]:
"""
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "parallelism")
@parallelism.setter
def parallelism(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "parallelism", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "selector", value)
@property
@pulumi.getter(name="ttlSecondsAfterFinished")
def ttl_seconds_after_finished(self) -> Optional[pulumi.Input[float]]:
"""
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.
"""
return pulumi.get(self, "ttl_seconds_after_finished")
@ttl_seconds_after_finished.setter
def ttl_seconds_after_finished(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "ttl_seconds_after_finished", value)
@pulumi.input_type
class JobStatusArgs:
def __init__(__self__, *,
active: Optional[pulumi.Input[float]] = None,
completion_time: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[List[pulumi.Input['JobConditionArgs']]]] = None,
failed: Optional[pulumi.Input[float]] = None,
start_time: Optional[pulumi.Input[str]] = None,
succeeded: Optional[pulumi.Input[float]] = None):
"""
JobStatus represents the current state of a Job.
:param pulumi.Input[float] active: The number of actively running pods.
:param pulumi.Input[str] completion_time: Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.
:param pulumi.Input[List[pulumi.Input['JobConditionArgs']]] conditions: The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input[float] failed: The number of pods which reached phase Failed.
:param pulumi.Input[str] start_time: Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.
:param pulumi.Input[float] succeeded: The number of pods which reached phase Succeeded.
"""
if active is not None:
pulumi.set(__self__, "active", active)
if completion_time is not None:
pulumi.set(__self__, "completion_time", completion_time)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if failed is not None:
pulumi.set(__self__, "failed", failed)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if succeeded is not None:
pulumi.set(__self__, "succeeded", succeeded)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[float]]:
"""
The number of actively running pods.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="completionTime")
def completion_time(self) -> Optional[pulumi.Input[str]]:
"""
Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.
"""
return pulumi.get(self, "completion_time")
@completion_time.setter
def completion_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completion_time", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[List[pulumi.Input['JobConditionArgs']]]]:
"""
The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[List[pulumi.Input['JobConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter
def failed(self) -> Optional[pulumi.Input[float]]:
"""
The number of pods which reached phase Failed.
"""
return pulumi.get(self, "failed")
@failed.setter
def failed(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "failed", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter
def succeeded(self) -> Optional[pulumi.Input[float]]:
"""
The number of pods which reached phase Succeeded.
"""
return pulumi.get(self, "succeeded")
@succeeded.setter
def succeeded(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "succeeded", value)
|
py | b40227cc5753489b60ac944b919f978a60c978b1 | #Import Selenium to interact with our target website
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
import SecondIteration
import os
# Helper function to slow down selenium and make sure that our page has loaded
def waitForPageLoad(targetDriver, targetCondition, delay):
try:
myElem = WebDriverWait(targetDriver, delay).until(EC.presence_of_element_located((By.XPATH, targetCondition)))
print("Page is ready!")
except:
print("Page timed out.")
return
#Filter full list of recipients with the helper function below
def sortRecipients(allRecipients, packageSelections):
cleanedList = []
for key, value in packageSelections.items():
keySplit = key.split(", at: ")
rName = keySplit[0]
rAddress = keySplit[1]
packageSize = value[0]
packageWeight_Lbs = value[1]
packageWeight_Oz = value[2]
packageContents = value[3]
for eachRecipient in allRecipients[2]:
if rName == eachRecipient[1] and rAddress == eachRecipient[2]:
listBuilder = eachRecipient
listBuilder.append(packageSize)
listBuilder.append(packageWeight_Lbs)
listBuilder.append(packageWeight_Oz)
listBuilder.append(packageContents)
cleanedList.append(listBuilder)
print(cleanedList)
return cleanedList
#Clean list of State values to align with dropdown cell input
def cleanStateValues(stateValue):
stateDictionary =states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MS': 'Mississippi',
'MT': 'Montana',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
apoDictionary = {"AE": "Armed Forces Europe"}
stateValue_Upper = str(stateValue).upper()
#If the stateValue is from an APO, it will have different formatting - Account for that case here
for key, value in apoDictionary.items():
formattedString = key + " - " + value
if stateValue_Upper == key or stateValue == value:
return formattedString
#Maybe the state value input already matches - if so, return the state value
elif stateValue == formattedString:
return formattedString
for key, value in stateDictionary.items():
# First check if the stateValue equals a formatted string from the dictionary key value pairs
# If so, its formatted correctly for the drop-down menu - No need to process further
formattedString = key + " - " + str(value).upper()
# Maybe the state value input already matches - if so, return the state value
if stateValue_Upper == formattedString:
return stateValue_Upper
#Check if the stateValue is in the Key value only, or in the dictionary values only
#If so, return the formatted string name
elif str(stateValue_Upper).upper() == key or str(stateValue).upper() == str(value).upper():
return formattedString
#TODO update sender input to include the sender row of information
#Need to add it to the input on the SecondIteration.py file, then update the code here to match the new input
def sQuery(recipientInformation, packageSelection):
senderInformation = recipientInformation[1] #Sender information is at list position in recipients list
recipients = recipientInformation[2]
recipientsCleaned = sortRecipients(recipientInformation, packageSelection)
for eachRow in recipientsCleaned:
#Set firefox preferences, specficially for downloading PDF forms
fp = webdriver.FirefoxProfile()
fp.set_preference("pdfjs.disabled", True)
fp.set_preference("browser.download.manager.showWhenStarting", False)
fp.set_preference("browser.download.manager.useWindow", False)
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/pdf, application/force-download")
#Call the Firefox browser
driver = webdriver.Firefox(fp)
driver.get("https://cfo.usps.com/flow-type")
# Add delay of 10 seconds to ensure that the target page opens
delay = 10 # seconds
try:
myElem = waitForPageLoad(driver, "//input[@value='militaryToUSA']", delay)
print ("Page is ready!")
except TimeoutException:
print ("Loading took too much time!")
# How is the object being routed?
driver.find_element_by_xpath("//input[@value='militaryToUSA']").click()
driver.find_element_by_id("submit").click()
# Does the object weigh less than a pound?
driver.find_element_by_xpath("//input[@value='0']").click()
driver.find_element_by_id("submit").click()
#Sender Zip Code
senderZip = SecondIteration.parseZipCode(senderInformation[5])
receiverZip = SecondIteration.parseZipCode(eachRow[5])
driver.find_element_by_id('senderZipCode').send_keys(senderZip)
#Reciever Zip Code
driver.find_element_by_id('recipientZipCode').send_keys(receiverZip)
driver.find_element_by_id("submit").click()
#Shipping non-dangerous items?
driver.find_element_by_id("submit").click()
#Enter package weight
myElem = waitForPageLoad(driver, "//input[@id='weightPounds']", delay)
driver.find_element_by_id('weightPounds').send_keys(eachRow[8])
driver.find_element_by_id('weightOunces').send_keys(eachRow[9])
driver.find_element_by_id("submit").click()
#Select package type for Customs form
#Sample xPath query to get the target radio button below
#$x("//div[contains(@class, 'note method-note') and (text()='Large Flat Rate Box')]/ancestor::label/input")
#Add wait here to wait a few seconds for the page to load
#delay = 5
#driver.implicitly_wait(5)#seconds
boxType = eachRow[7]
#xPathQuery = "//div[contains(@class, 'note method-note') and (text()='" + boxType + "')]/ancestor::label/input\")"
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//div[contains(@class, 'note method-note') and (text()='" +
boxType + "')]/ancestor::label/input")))
driver.find_element_by_xpath("//div[contains(@class, 'note method-note') and (text()='" + boxType + "')]/ancestor::label/input").click()
driver.find_element_by_id("submit").click()
#Wait for page to load
element_2 = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'firstName')))
# Add Sender Information here - This should be constant for each run
driver.find_element_by_id('firstName').send_keys(senderInformation[1].split()[0])# First Name
driver.find_element_by_id('lastName').send_keys(senderInformation[1].split()[1])#Last Name
driver.find_element_by_id('companyName').send_keys(senderInformation[1].split()[1]) # Company Name as Last Name
driver.find_element_by_id('streetAddress').send_keys(senderInformation[2]) #Street Address
driver.find_element_by_id('city').send_keys(senderInformation[3])
#TODO get dropdown state from menu
#driver.find_element_by_id('stateId').selectByVisibleText(senderInformation[4])
stateSel = Select(driver.find_element_by_id('stateId'))
#Format state input before selecting by visible
formattedStateValue = cleanStateValues(senderInformation[4])
#Wait a second for the dropdown to select, to ensure that input is recorded
#driver.implicitly_wait(5)#seconds
#Alternatively, let's wait until the obscuring element is clear
#driver.wait.until(EC.invisibility_of_element_located((By.XPATH,
# "//div[@class='gray-overlay']")))
WebDriverWait(driver, 2).until(EC.invisibility_of_element_located((By.XPATH,
"//div[@class='gray-overlay']")))
stateSel.select_by_visible_text(formattedStateValue)
#TODO annonymize phone number and email input
driver.find_element_by_id('phone').send_keys("1234567890")
driver.find_element_by_id('email').send_keys("[email protected]")
driver.find_element_by_id("submit").click()
#Wait for the recipients page to load
ignored_exceptions = (NoSuchElementException, StaleElementReferenceException,)
your_element = WebDriverWait(driver, 10, ignored_exceptions=ignored_exceptions) \
.until(EC.presence_of_element_located((By.ID, 'destination-country')))
WebDriverWait(driver, 10).until(EC.invisibility_of_element_located((By.XPATH,
"//div[@class='gray-overlay']")))
# Add Recipient Information here - This will be unique for each row in the target senders table
driver.find_element_by_id('firstName').send_keys(eachRow[1].split()[0])# First Name
driver.find_element_by_id('lastName').send_keys(eachRow[1].split()[1])#Last Name
driver.find_element_by_id('companyName').send_keys(eachRow[1].split()[1]) # Company Name as Last Name
driver.find_element_by_id('streetAddress').send_keys(eachRow[2]) #Street Address
driver.find_element_by_id('city').send_keys(eachRow[3])
#TODO get dropdown state from menu
#driver.find_element_by_id('stateId').selectByVisibleText(senderInformation[4])
#stateSel.select_by_visible_text(eachRow[4])
#Format state input before selecting by visible
#Wait until the drop down box appears
formattedStateValue_Recipient = cleanStateValues(eachRow[4])
stateSel2 = Select(driver.find_element_by_id('stateId'))
stateSel2.select_by_visible_text(formattedStateValue_Recipient)
#driver.findElements(By.xpath("//select[@id = 'selectId')]/option[contains(text(), 'DOLLAR')]")).click();
#Wait a second for the dropdown to select, to ensure that input is recorded
delay = 1
#TODO annonymize phone number and email input - Make this the same as the sender
driver.find_element_by_id('phone').send_keys("1234567890")
driver.find_element_by_id('email').send_keys("[email protected]")
driver.find_element_by_id("submit").click()
#Wait for the page to load
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//div[@class='radio-button']")))
# What should USPS do if the package can't be delivered?
# Return to sender
driver.find_element_by_xpath("//input[@value='0']").click()
driver.find_element_by_id("submit").click()
#Wait for the page to load
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//textarea[@name='additionalComments']")))
# What category is the contents of the package? Add comments as needed
strBuilder = "//input[@value=" + eachRow[10] + "]"
driver.find_element_by_xpath("//input[@value='Gifts']").click()
#driver.find_element_by_id(strBuilder).click()
driver.find_element_by_id('additionalComments').send_keys(eachRow[10])
driver.find_element_by_id("submit").click()
#Wait for the page to load
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//input[@name='isCommercialSender']")))
# Are you a commercial sender?
driver.find_element_by_xpath("//input[@value='0']").click() #Not a commercial sender
driver.find_element_by_id("submit").click()
#Wait for the page to load
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//input[@name='itemDesc']")))
# Add items to the list of package contents, including:
# Description, Quantity, Value, Weight,
driver.find_element_by_xpath("//input[@name='itemDesc']").send_keys("Gifts") #Description
driver.find_element_by_xpath("//input[@name='itemQty']").send_keys("1") #Quantity
driver.find_element_by_xpath("//input[@name='unitValue']").send_keys("10") #Value
driver.find_element_by_xpath("//input[@name='weightPounds']").send_keys(eachRow[8]) #Weight, Pounds
driver.find_element_by_xpath("//input[@name='weightOunces']").send_keys(eachRow[9]) #Weight, Ounces
driver.find_element_by_id("submit").click()
#Wait for the page to load
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//select[@id='eelCode']")))
# Confirm the AES Export Option
driver.find_element_by_id("submit").click()
#Wait for the page to load
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH,
"//input[@name='print']")))
# Print the Customs form and save as a PDF to the target folder
driver.find_element_by_id("submit").click() #The print customs form button has ID Submit
#At this point, there should be a PDF in the downloads folder
# TODO - Rename the PDF with some naming logic to help keep the folder organized
|
py | b40228401a67bcd895d56949ef3fafb8f5b27c25 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = ['ContainerArgs', 'Container']
@pulumi.input_type
class ContainerArgs:
def __init__(__self__, *,
data_format: pulumi.Input[Union[str, 'AzureContainerDataFormat']],
device_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
storage_account_name: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Container resource.
:param pulumi.Input[Union[str, 'AzureContainerDataFormat']] data_format: DataFormat for Container
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] storage_account_name: The Storage Account Name
:param pulumi.Input[str] container_name: The container name.
"""
pulumi.set(__self__, "data_format", data_format)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_account_name", storage_account_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> pulumi.Input[Union[str, 'AzureContainerDataFormat']]:
"""
DataFormat for Container
"""
return pulumi.get(self, "data_format")
@data_format.setter
def data_format(self, value: pulumi.Input[Union[str, 'AzureContainerDataFormat']]):
pulumi.set(self, "data_format", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> pulumi.Input[str]:
"""
The Storage Account Name
"""
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
The container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
class Container(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
data_format: Optional[pulumi.Input[Union[str, 'AzureContainerDataFormat']]] = None,
device_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents a container on the Data Box Edge/Gateway device.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_name: The container name.
:param pulumi.Input[Union[str, 'AzureContainerDataFormat']] data_format: DataFormat for Container
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] storage_account_name: The Storage Account Name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a container on the Data Box Edge/Gateway device.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param ContainerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
data_format: Optional[pulumi.Input[Union[str, 'AzureContainerDataFormat']]] = None,
device_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerArgs.__new__(ContainerArgs)
__props__.__dict__["container_name"] = container_name
if data_format is None and not opts.urn:
raise TypeError("Missing required property 'data_format'")
__props__.__dict__["data_format"] = data_format
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if storage_account_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_name'")
__props__.__dict__["storage_account_name"] = storage_account_name
__props__.__dict__["container_status"] = None
__props__.__dict__["created_date_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201:Container"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Container"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:Container")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Container, __self__).__init__(
'azure-native:databoxedge:Container',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Container':
"""
Get an existing Container resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ContainerArgs.__new__(ContainerArgs)
__props__.__dict__["container_status"] = None
__props__.__dict__["created_date_time"] = None
__props__.__dict__["data_format"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return Container(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="containerStatus")
def container_status(self) -> pulumi.Output[str]:
"""
Current status of the container.
"""
return pulumi.get(self, "container_status")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> pulumi.Output[str]:
"""
The UTC time when container got created.
"""
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> pulumi.Output[str]:
"""
DataFormat for Container
"""
return pulumi.get(self, "data_format")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output['outputs.RefreshDetailsResponse']:
"""
Details of the refresh job on this container.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Container in DataBoxEdge Resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
|
py | b402287480c643e3f9d604b09a5e3a3de7c45dfe | #!/usr/bin/env python3
#############################################################################
# Filename : Joystick.py
# Description : Read Joystick
# Author : freenove
# modification: 2018/08/02
########################################################################
import RPi.GPIO as GPIO
import smbus
import time
address = 0x48
bus=smbus.SMBus(1)
cmd=0x40
Z_Pin = 12 #define pin for Z_Pin
def analogRead(chn): #read ADC value
bus.write_byte(address,cmd+chn)
value = bus.read_byte(address)
value = bus.read_byte(address)
#value = bus.read_byte_data(address,cmd+chn)
return value
def analogWrite(value):
bus.write_byte_data(address,cmd,value)
#Y1 Left
#Y2 Right
#X1 Down
#X2 Up
def location(x):
if x==1:
Y = analogRead(0)
X = analogRead(1)
elif x==2:
Y = analogRead(2)
X = analogRead(3)
else: raise Exception("Error")
if Y > 222:
Y=1
elif Y < 50:
Y=2
else:
Y=0
if X > 222:
X=1
elif X < 50:
X=2
else:
X=0
return Y,X
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(Z_Pin,GPIO.IN,GPIO.PUD_UP) #set Z_Pin to pull-up mode
def loop():
while True:
val_Z = GPIO.input(Z_Pin) #read digital quality of axis Z
val_Y = analogRead(0) #read analog quality of axis X and Y
val_X = analogRead(1)
X,Y = location()
print ('value_X: %d ,\tvalue_Y: %d ,\tvalue_Z: %d ,\tdirection: Y%d,X%d'%(val_X,val_Y,val_Z,X,Y))
time.sleep(0.01)
def destroy():
bus.close()
GPIO.cleanup()
if __name__ == '__main__':
print ('Program is starting ... ')
setup()
try:
loop()
except KeyboardInterrupt:
destroy()
|
py | b402289f6f7c79d2447303111d384dbe83c44cf4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import glob
import shutil
import string
import dbm
from common import *
def render_mapperTx(fname, **kwargs):
with open(fname, 'w+') as fw:
fw.write(serve_template('mysqlMapperTx.mako', **kwargs))
def render_mapper(fname, **kwargs):
with open(fname, 'w+') as fw:
fw.write(serve_template('mysqlMapper.mako', **kwargs))
def render_mapperImpl(fname, **kwargs):
with open(fname, 'w+') as fw:
fw.write(serve_template('mysqlMapperImpl.mako', **kwargs))
def gen_mapper(prjinfo, minfo):
outfolder = os.path.join(prjinfo._root_, 'java/_project_/_project_-serviceImpl/src/main/java/com/_company_/_project_/mapper')
outfolder = format_line(outfolder, prjinfo)
fpath = os.path.join(outfolder, minfo['ns'])
if not os.path.exists(fpath):
os.makedirs(fpath)
rpath = os.path.join(fpath, 'r')
if not os.path.exists(rpath):
os.makedirs(rpath)
wpath = os.path.join(fpath, 'w')
if not os.path.exists(wpath):
os.makedirs(wpath)
kwargs = {}
kwargs['prj'] = prjinfo
kwargs['emm'] = prjinfo.emm
kwargs['minfo'] = minfo
kwargs['_now_'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
kwargs['_module_'] = minfo['ns']
for table in minfo['tables']:
kwargs['_tbi_'] = table
kwargs['_ROLE_'] = 'Slave'
kwargs['_ROLE_NS_'] = '.r'
fname = os.path.join(fpath, 'r', table.java.name + 'MapperSlave.java')
render_mapper(fname, **kwargs)
kwargs['_ROLE_'] = 'Master'
kwargs['_ROLE_NS_'] = '.w'
fname = os.path.join(fpath, 'w', table.java.name + 'MapperMaster.java')
render_mapper(fname, **kwargs)
fname = os.path.join(fpath, table.java.name + 'Tx.java')
render_mapperTx(fname, **kwargs)
# def gen_mapperImpl(prjinfo, minfo):
# outfolder = os.path.join(prjinfo._root_, 'java/_project_/_project_-serviceImpl/src/main/java/com/_company_/_project_/mapper/impl')
# outfolder = format_line(outfolder, prjinfo)
# fpath = os.path.join(outfolder, minfo['ns'])
# if not os.path.exists(fpath):
# os.makedirs(fpath)
# kwargs = {}
# kwargs['prj'] = prjinfo
# kwargs['emm'] = prjinfo.emm
# kwargs['minfo'] = minfo
# kwargs['_now_'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# kwargs['_module_'] = minfo['ns']
# for table in minfo['tables']:
# fname = os.path.join(fpath, 'Abstract' + table.java.name + 'MapperImpl.java')
# kwargs['_tbi_'] = table
# render_template(fname, 'mysqlMapperImplAb.mako', **kwargs)
# fname = os.path.join(fpath, table.java.name + 'MapperImpl.java')
# kwargs['_tbi_'] = table
# render_mapperImpl(fname, **kwargs)
# fname = os.path.join(fpath, table.java.name + 'Tx.java')
# render_mapperTx(fname, **kwargs)
# def gen_config(prjinfo, target):
# kwargs = {}
# kwargs['_modules_'] = prjinfo._modules_
# outfolder = os.path.join(prjinfo._root_, 'java/_project_/_project_-web-res/src/%s/resources' % target)
# outfolder = format_line(outfolder, prjinfo)
# fname = os.path.join(outfolder, 'mysql.yaml')
# render_template(fname, 'config-mysql.mako', **kwargs)
# fname = os.path.join(outfolder, 'mapper.yaml')
# render_template(fname, 'config-mapper.mako', **kwargs)
def start(prjinfo):
if not os.path.exists(prjinfo._root_):
os.makedirs(prjinfo._root_)
dbm.read_tables(prjinfo)
for minfo in prjinfo._modules_:
gen_mapper(prjinfo, minfo)
# gen_mapperImpl(prjinfo, minfo)
# gen_config(prjinfo, 'main')
# gen_config(prjinfo, 'test')
|
py | b4022922f3e0a7d3b3a3a9e4526835f9ae86cdb5 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_create_translation_job.py
DESCRIPTION:
This sample demonstrates how to create a translation job for documents in your Azure Blob
Storage container and wait until the job is completed.
To set up your containers for translation and generate SAS tokens to your containers (or files)
with the appropriate permissions, see the README.
USAGE:
python sample_create_translation_job.py
Set the environment variables with your own values before running the sample:
1) AZURE_DOCUMENT_TRANSLATION_ENDPOINT - the endpoint to your Document Translation resource.
2) AZURE_DOCUMENT_TRANSLATION_KEY - your Document Translation API key.
3) AZURE_SOURCE_CONTAINER_URL - the container SAS URL to your source container which has the documents
to be translated.
4) AZURE_TARGET_CONTAINER_URL - the container SAS URL to your target container where the translated documents
will be written.
"""
def sample_translation():
import os
# [START wait_until_done]
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document import (
DocumentTranslationClient,
DocumentTranslationInput,
TranslationTarget
)
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url = os.environ["AZURE_SOURCE_CONTAINER_URL"]
target_container_url = os.environ["AZURE_TARGET_CONTAINER_URL"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
job = client.create_translation_job(inputs=[
DocumentTranslationInput(
source_url=source_container_url,
targets=[
TranslationTarget(
target_url=target_container_url,
language_code="es"
)
]
)
]
) # type: JobStatusResult
job_result = client.wait_until_done(job.id) # type: JobStatusResult
print("Job status: {}".format(job_result.status))
print("Job created on: {}".format(job_result.created_on))
print("Job last updated on: {}".format(job_result.last_updated_on))
print("Total number of translations on documents: {}".format(job_result.documents_total_count))
print("\nOf total documents...")
print("{} failed".format(job_result.documents_failed_count))
print("{} succeeded".format(job_result.documents_succeeded_count))
# [END wait_until_done]
# [START list_all_document_statuses]
doc_results = client.list_all_document_statuses(job_result.id) # type: ItemPaged[DocumentStatusResult]
for document in doc_results:
print("Document ID: {}".format(document.id))
print("Document status: {}".format(document.status))
if document.status == "Succeeded":
print("Source document location: {}".format(document.source_document_url))
print("Translated document location: {}".format(document.translated_document_url))
print("Translated to language: {}\n".format(document.translate_to))
else:
print("Error Code: {}, Message: {}\n".format(document.error.code, document.error.message))
# [END list_all_document_statuses]
if __name__ == '__main__':
sample_translation()
|
py | b4022925971a3eac12c46b396ac1d2e6cae0a052 | import argparse
import os
import numpy as np
import scipy.misc as ssc
import kitti_util
def project_disp_to_points(calib, disp, max_high):
disp[disp < 0] = 0
baseline = 0.54
mask = disp > 0
depth = calib.f_u * baseline / (disp + 1. - mask)
rows, cols = depth.shape
print('>>>>>>>>>>>>>> in project disp to point', rows, cols)
c, r = np.meshgrid(np.arange(cols), np.arange(rows))
points = np.stack([c, r, depth])
points = points.reshape((3, -1))
points = points.T
points = points[mask.reshape(-1)]
print('>>>>>>>> consider saving this', points.shape)
cloud = calib.project_image_to_velo(points)
valid = (cloud[:, 0] >= 0) & (cloud[:, 2] < max_high)
return cloud[valid]
def project_depth_to_points(calib, depth, max_high):
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows))
points = np.stack([c, r, depth])
points = points.reshape((3, -1))
points = points.T
cloud = calib.project_image_to_velo(points)
valid = (cloud[:, 0] >= 0) & (cloud[:, 2] < max_high)
return cloud[valid]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Libar')
parser.add_argument('--calib_dir', type=str,
default='~/Kitti/object/training/calib')
parser.add_argument('--disparity_dir', type=str,
default='~/Kitti/object/training/predicted_disparity')
parser.add_argument('--save_dir', type=str,
default='~/Kitti/object/training/predicted_velodyne')
parser.add_argument('--max_high', type=int, default=1)
parser.add_argument('--is_depth', action='store_true')
args = parser.parse_args()
assert os.path.isdir(args.disparity_dir)
assert os.path.isdir(args.calib_dir)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
disps = [x for x in os.listdir(args.disparity_dir) if x[-3:] == 'png' or x[-3:] == 'npy']
disps = sorted(disps)
for fn in disps:
predix = fn[:-4]
calib_file = '{}/{}.txt'.format(args.calib_dir, predix)
calib = kitti_util.Calibration(calib_file)
# disp_map = ssc.imread(args.disparity_dir + '/' + fn) / 256.
if fn[-3:] == 'png':
disp_map = ssc.imread(args.disparity_dir + '/' + fn)
elif fn[-3:] == 'npy':
disp_map = np.load(args.disparity_dir + '/' + fn)
else:
assert False
if not args.is_depth:
disp_map = (disp_map*256).astype(np.uint16)/256.
lidar = project_disp_to_points(calib, disp_map, args.max_high)
else:
disp_map = (disp_map).astype(np.float32)/256.
lidar = project_depth_to_points(calib, disp_map, args.max_high)
# pad 1 in the indensity dimension
lidar = np.concatenate([lidar, np.ones((lidar.shape[0], 1))], 1)
lidar = lidar.astype(np.float32)
lidar.tofile('{}/{}.bin'.format(args.save_dir, predix))
print('Finish Depth {}'.format(predix))
|
py | b402293cd0c5208f6babd90a2b63a793d99b9153 | """ creating a class for code challenge """
import numpy as np
import math as mt
class Stats(array):
"""creating stats class"""
def __init__(self, array):
pass
def mean():
l = len(numbers)
mean = sum(array) / l
print("The mean is: ", mean)
def standard_deviation():
std = sum(array) - mean
squared_mean = std**2
standardized = mt.sqrt(squared_mean)
print("The standard deviation is: ", standardized)
|
py | b4022a8cb5d2e6296890dde8d3785edb764119a8 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: deploy_batch.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from cmdb_sdk.model.easy_flow import deploy_target_pb2 as cmdb__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='deploy_batch.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x12\x64\x65ploy_batch.proto\x12\teasy_flow\x1a,cmdb_sdk/model/easy_flow/deploy_target.proto\"\xbe\x01\n\x0b\x44\x65ployBatch\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x10\n\x08\x62\x61tchNum\x18\x02 \x01(\x05\x12\x15\n\rbatchInterval\x18\x03 \x01(\x05\x12/\n\x07\x62\x61tches\x18\x04 \x03(\x0b\x32\x1e.easy_flow.DeployBatch.Batches\x12\x12\n\nfailedStop\x18\x05 \x01(\x08\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTargetBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[cmdb__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,])
_DEPLOYBATCH_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='easy_flow.DeployBatch.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='easy_flow.DeployBatch.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=270,
)
_DEPLOYBATCH = _descriptor.Descriptor(
name='DeployBatch',
full_name='easy_flow.DeployBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployBatch.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployBatch.batchNum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployBatch.batchInterval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batches', full_name='easy_flow.DeployBatch.batches', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployBatch.failedStop', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYBATCH_BATCHES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=270,
)
_DEPLOYBATCH_BATCHES.fields_by_name['targets'].message_type = cmdb__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_DEPLOYBATCH_BATCHES.containing_type = _DEPLOYBATCH
_DEPLOYBATCH.fields_by_name['batches'].message_type = _DEPLOYBATCH_BATCHES
DESCRIPTOR.message_types_by_name['DeployBatch'] = _DEPLOYBATCH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeployBatch = _reflection.GeneratedProtocolMessageType('DeployBatch', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYBATCH_BATCHES,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch.Batches)
})
,
'DESCRIPTOR' : _DEPLOYBATCH,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch)
})
_sym_db.RegisterMessage(DeployBatch)
_sym_db.RegisterMessage(DeployBatch.Batches)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | b4022b8fb0b1e8dec1a1eea36a56c390086fee01 | #!/usr/bin/python3
"""Utilities for CASegorizer"""
import argparse
import csv
import pickle
import os
_PARSER = argparse.ArgumentParser(description='Processes training set.')
_PARSER.add_argument('-f', '--file', type=str, required=True,
help='filename containing training set or pickled file')
_PARSER.add_argument('-s', '--save', action='store_true',
help='save the pickled data to particular file')
_PARSER.add_argument('-l', '--load', action='store_true',
help='load the pickled data from particular file')
def load(filename):
"""Loads elements from Pickle file
Args:
- Filename - str of location
Return:
Tuple with:
- Lists of Activity Names
- Lists of Activity Types
"""
assert type(filename) == str, 'Filename must be a str'
with open(filename, 'rb') as loader:
elements = pickle.load(loader)
assert type(elements) == tuple, 'Element is not valid!'
assert len(elements) == 2, 'Element is not valid!'
assert len(elements[0]) == len(elements[1]), 'Element is not valid!'
assert type(elements[0]) == list, 'Element is not valid!'
assert type(elements[1]) == list, 'Element is not valid!'
return elements
def save(elements, filename):
"""Pickles elements to a particular filename
Args:
- elements: tuple(list, list)
- filename: str of location to save serialization
Return:
None
"""
assert type(filename) == str, 'Filename must be a str'
assert filename.endswith('.pkl'), 'File must be pkl!'
assert type(elements) == tuple, 'Element must be a tuple!'
assert len(elements) == 2, 'Tuple must be of length 2!'
assert len(elements[0]) == len(elements[1]), 'Elements not equal length!'
assert type(elements[0]) == list, 'First element must be a list!'
assert type(elements[1]) == list, 'Second element must be a list!'
with open(filename, 'wb') as dump:
pickle.dump(elements, dump)
return
def extractor(filename):
"""Return a tuple containing two lists of the Activity title and
Activity type(s)
Args:
- filename: string of the filename
Returns:
Tuple with:
- Lists of Activity Names
- Lists of Activity Types
"""
assert filename.endswith('.csv'), 'File must be a csv file!'
activity_names = []
activity_types = []
with open(filename, 'r') as cas_file:
cas_reader = csv.reader(cas_file, delimiter=',')
for row in cas_reader:
activity_names.append(row[0])
activity_types.append([int(tp) for tp in row[1:] if int(tp)])
return (activity_names, activity_types)
def main():
"""Main function of utils.py"""
parse_args = _PARSER.parse_args()
if parse_args.save:
csv_file = parse_args.file
data = extractor(csv_file)
save(data, os.path.realpath(os.path.join(os.path.dirname(csv_file),
'data.pkl')))
elif parse_args.load:
pkl_file = parse_args.file
data = load(pkl_file)
else:
print('Nothing happened')
if __name__ == '__main__':
main()
|
py | b4022bebf69c07eff73be902d251d49d9236180f | '''
FiniteMMSB.py
'''
from builtins import *
import numpy as np
from scipy.sparse import csc_matrix
from bnpy.allocmodel import AllocModel
from bnpy.suffstats import SuffStatBag
from bnpy.util import gammaln, digamma, EPS
from bnpy.util.NumericUtil import calcRlogR
from bnpy.allocmodel.topics.HDPTopicUtil import c_Dir
class FiniteMMSB(AllocModel):
""" Mixed membership stochastic block model, with K components.
Attributes
-------
inferType : string {'EM', 'VB', 'moVB', 'soVB'}
indicates which updates to perform for local/global steps
K : int
number of components
alpha : float
scalar symmetric Dirichlet prior on mixture weights
pi_v ~ Dir( alpha/K, alpha/K, ... alpha/K)
Attributes for VB
---------
theta : 2D array, nNodes x K
theta[n,:] gives parameters for Dirichlet variational factor
defining distribution over membership probabilities for node n
"""
def __init__(self, inferType, priorDict=dict()):
if inferType.count('EM') > 0:
raise NotImplementedError(
'EM not implemented for FiniteMMSB (yet)')
self.inferType = inferType
self.set_prior(**priorDict)
self.K = 0
# Variational parameter for pi
self.theta = None
def set_prior(self, alpha=.1):
self.alpha = float(alpha)
def get_active_comp_probs(self):
print('TODO')
def getCompDims(self):
''' Get dimensions of latent component interactions.
Overrides default of ('K',), since E_log_soft_ev needs to be ('K','K')
Returns
-------
dims : tuple
'''
return ('K', 'K',)
def E_logPi(self):
''' Compute expected value of log \pi for each node and state.
Returns
-------
ElogPi : 2D array, nNodes x K
'''
sumtheta = self.theta.sum(axis=1)
ElogPi = digamma(self.theta) - digamma(sumtheta)[:, np.newaxis]
return ElogPi
def calc_local_params(self, Data, LP, **kwargs):
''' Compute local parameters for provided dataset.
Args
-------
Data : GraphData object
LP : dict
Local parameters, must contain fields
* E_log_soft_ev : nEdges x K x K
Returns
-------
LP : dict
Local parameters, will contain updated fields:
* resp : nEdges x K x K
resp[e,j,k] = prob that edge e is explained by
connection from state/block j to block k
'''
if self.inferType.count('EM') > 0:
raise NotImplementedError("TODO")
if Data.isSparse: # Sparse binary data.
raise NotImplementedError("TODO")
K = self.K
ElogPi = self.E_logPi()
# resp : nEdges x K x K
# resp[e(s,t),k,l] = ElogPi[s,k] + ElogPi[t,l] + likelihood
resp = ElogPi[Data.edges[:,0], :, np.newaxis] + \
ElogPi[Data.edges[:,1], np.newaxis, :]
logSoftEv = LP['E_log_soft_ev'] # E x K x K
resp += logSoftEv
# In-place exp and normalize
resp -= np.max(resp, axis=(1,2))[:, np.newaxis, np.newaxis]
np.exp(resp, out=resp)
resp /= resp.sum(axis=(1,2))[:, np.newaxis, np.newaxis]
np.maximum(resp, 1e-100, out=resp)
LP['resp'] = resp
return LP
def get_global_suff_stats(self, Data, LP, doPrecompEntropy=0, **kwargs):
''' Compute sufficient stats for provided dataset and local params
Returns
-------
SS : SuffStatBag
Updated fields
* NodeStateCount : 2D array, nNodes x K
* N : 2D array, size K x K
'''
K = LP['resp'].shape[-1]
V = Data.nNodes
SS = SuffStatBag(K=K, D=Data.dim, V=V)
# NodeStateCount_src[i,k]
# Num edges assigned to topic k associated with node i as source
srcResp = LP['resp'].sum(axis=2)
NodeStateCount_src = Data.getSparseSrcNodeMat() * srcResp
# Equivalent but slower: for loop
# NodeStateCount_src = np.zeros((Data.nNodes, K))
# for i in xrange(Data.nNodes):
# mask_i = Data.edges[:,0] == i
# NodeStateCount_src[i,:] = srcResp[mask_i].sum(axis=0)
# NodeStateCount_rcv[i,k]
# Num edges assigned to topic k associated with node i as receiver
rcvResp = LP['resp'].sum(axis=1)
NodeStateCount_rcv = Data.getSparseRcvNodeMat() * rcvResp
# Summing src counts and rcv counts gives the total
SS.setField(
'NodeStateCount', NodeStateCount_src + NodeStateCount_rcv,
dims=('V', 'K'))
# Compute total atoms assigned to each cluster pair
Nresp = np.sum(LP['resp'], axis=0)
SS.setField('N', Nresp, dims=('K','K'))
if doPrecompEntropy:
# Remember, resp has shape nEdges x K x K
# So, need to sum so we track scalar entropy, not K x K
Hresp = calcLentropyAsScalar(LP)
SS.setELBOTerm('Hresp', Hresp, dims=None)
return SS
def forceSSInBounds(self, SS):
''' Force certain fields in bounds, to avoid numerical issues.
Returns
-------
Nothing. SS is updated in-place.
'''
np.maximum(SS.NodeStateCount, 0, out=SS.NodeStateCount)
def update_global_params_VB(self, SS, **kwargs):
''' Update global parameter theta to optimize VB objective.
Post condition
--------------
Attribute theta set to optimal value given suff stats.
'''
self.theta = self.alpha / SS.K + SS.NodeStateCount
def set_global_params(self, hmodel=None, theta=None, **kwargs):
''' Set global parameters to specific values.
Post condition
--------------
Attributes theta, K set to provided values.
'''
if hmodel is not None:
self.K = hmodel.allocModel.K
if self.inferType == 'EM':
raise NotImplemetedError(
'EM not implemented (yet) for FiniteMMSB')
elif self.inferType.count('VB') > 0:
self.theta = hmodel.allocModel.theta
else:
if self.inferType == 'EM':
raise NotImplemetedError(
'EM not implemented (yet) for FiniteMMSB')
elif self.inferType.count('VB') > 0:
self.theta = theta
self.K = theta.shape[-1]
def init_global_params(self, Data, K=0, initLP=None, **kwargs):
''' Initialize global parameters "from scratch" to reasonable values.
Post condition
--------------
Attributes theta, K set to reasonable values.
'''
self.K = K
if initLP is not None:
# Compute NodeStateCount from provided initial local params
initSS = self.get_global_suff_stats(Data, initLP)
self.theta = self.alpha / K + initSS.NodeStateCount
else:
# Create random initNodeStateCount values
# by drawing from Dirichlet prior on pi_v, scaled by nEdgesPerNode
PRNG = np.random.RandomState(K)
piMean = self.alpha / K * np.ones(K)
nEdgesPerNode = Data.getSparseSrcNodeMat().sum(axis=1) + \
Data.getSparseRcvNodeMat().sum(axis=1)
initNodeStateCount = nEdgesPerNode * \
PRNG.dirichlet(piMean, size=Data.nNodes)
self.theta = self.alpha / K + initNodeStateCount
def calc_evidence(self, Data, SS, LP, todict=0, **kwargs):
''' Compute training objective function on provided input.
Returns
-------
L : scalar float
'''
Lalloc = self.L_alloc_no_slack()
Lslack = self.L_slack(SS)
if SS.hasELBOTerm('Hresp'):
Lentropy = SS.getELBOTerm('Hresp')
else:
Lentropy = calcLentropyAsScalar(LP)
if todict:
return dict(Lentropy=Lentropy, Lalloc=Lalloc, Lslack=Lslack)
return Lalloc + Lentropy + Lslack
def L_alloc_no_slack(self):
''' Compute allocation term of objective function, without slack term
Returns
-------
L : scalar float
'''
N = self.theta.shape[0]
K = self.K
prior_cDir = N * (gammaln(self.alpha) - K * gammaln(self.alpha/K))
post_cDir = np.sum(gammaln(np.sum(self.theta, axis=1))) - \
np.sum(gammaln(self.theta))
return prior_cDir - post_cDir
def L_slack(self, SS):
''' Compute slack term of the allocation objective function.
Returns
-------
L : scalar float
'''
ElogPi = digamma(self.theta) - \
digamma(np.sum(self.theta, axis=1))[:, np.newaxis]
Q = SS.NodeStateCount + self.alpha / SS.K - self.theta
Lslack = np.sum(Q * ElogPi)
return Lslack
def L_entropy_as_scalar(self, LP):
''' Compute entropy term of objective as a scalar.
Returns
-------
Hresp : scalar
'''
return calcLentropyAsScalar(LP)
def to_dict(self):
return dict(theta=self.theta)
def from_dict(self, myDict):
self.inferType = myDict['inferType']
self.K = myDict['K']
self.theta = myDict['theta']
def get_prior_dict(self):
return dict(alpha=self.alpha)
def calc_estZ(self):
''' Calculate hard assignment for each node.
Returns
-------
Z : 1D array, size nNodes
indicator for which cluster each node most belongs to
'''
return np.argmax(self.theta, axis=1)
def calcLentropyAsScalar(LP):
''' Compute entropy term of objective as a scalar.
Returns
-------
Hresp : scalar
'''
return -1.0 * np.sum(calcRlogR(LP['resp']))
'''
def initLPFromTruth(self, Data):
K = np.max(Data.TrueParams['Z']) + 1
N = Data.nNodes
Z = Data.TrueParams['Z']
resp = np.zeros((N, N, K, K))
for i in xrange(N):
for j in xrange(N):
resp[i, j, Z[i, j, 0], Z[j, i, 0]] = 1
diag = np.diag_indices(N)
resp[diag[0], diag[1], :, :] = 0
squareResp = resp
resp = np.reshape(resp, (N**2, K, K))
LP = {'resp': resp, 'squareResp': squareResp}
if Data.isSparse:
LP['Count1'] = np.sum(squareResp[Data.respInds[:, 0],
Data.respInds[:, 1]], axis=0)
LP['Count0'] = np.sum(squareResp, axis=(0, 1)) - LP['Count1']
return LP
'''
|
py | b4022d97510e3aac633c58d3106d94130cf2bcf9 | from __future__ import print_function
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import logging
import numpy as np
from time import time
import utils as U
import codecs
from optimizers import get_optimizer
from model import create_model
import keras.backend as K
from keras.preprocessing import sequence
import reader as dataset
from tqdm import tqdm
import pandas as pd
import json
from nltk.corpus import wordnet as wn
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class Node(object):
"""Class represents the node objects, which are displayed in the JSON file.
Args:
id: String that specifies the topic label
group: Integer that specifies the color of the node
occurrences: String that specifies the number of topic occurrences
words: Lists of representative words
sentences: List of representative sentences
"""
def __init__(self, id, group, occurrences, words, sentences):
self.id = id
self.group = group
self.occurrences = occurrences
self.words = words
self.sentences = sentences
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=2)
class Link(object):
"""Class represents the link objects, which are displayed in the JSON file.
Args:
source: String that specifies the topic label of the first node, the link is connected to
target: String that specifies the topic label of the second node, the link is connected to
value: Float that specifies the similarity (correlation) between source and target
"""
def __init__(self, source, target, value):
self.source = source
self.target = target
self.value = value
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=2)
class TopWord(object):
"""Class represents top words that are displayed in the JSON file
Args:
rank: Integer that specifies the rank in the word list (e.g., 1 --> Most representative word)
word: Unicode that specifies the word
similarity: String that specifies the similarity (correlation) to the topic embedding
"""
def __init__(self, rank, word, similarity):
self.rank = rank
self.word = word
self.similarity = similarity
class TopSentence(object):
"""Class represents top sentences that are displayed in the JSON file
Args:
rank: Integer that specifies the rank in the sentence list (e.g., 1 --> Most representative sentence)
sentence: Unicode that specifies the sentence
"""
def __init__(self, rank, sentence):
self.rank = rank
self.sentence = sentence
class Train(object):
"""Class used to train the model and generate relevant topic information
Args:
args: Argparse instance that contains the relevant parameters
logger: Logger instance
out_dir: String that contains the path to the output directory
"""
def __init__(self, args, logger, out_dir):
self.args = args
self.logger = logger
self.out_dir = out_dir
self.vocab, train_x, test_x, self.overall_maxlen = dataset.get_data(self.args.domain,
vocab_size=self.args.vocab_size,
maxlen=self.args.maxlen)
self.train_x = sequence.pad_sequences(train_x, maxlen=self.overall_maxlen)
self.test_x = sequence.pad_sequences(test_x, maxlen=self.overall_maxlen)
self.vis_path = self.out_dir + "/visualization"
U.mkdir_p(self.vis_path)
def sentence_batch_generator(self, data, batch_size):
""" Generates batches based on the data.
Args:
data: Numpy array of the data
batch_size: Integer that specifies the batch size (e.g. 64)
Returns:
"""
n_batch = len(data) / batch_size
batch_count = 0
np.random.shuffle(data)
while True:
if batch_count == n_batch:
np.random.shuffle(data)
batch_count = 0
batch = data[batch_count * batch_size: (batch_count + 1) * batch_size]
batch_count += 1
yield batch
def negative_batch_generator(self, data, batch_size, neg_size):
"""Generates negative batches based on the data.
Args:
data: Numpy array of the data
batch_size: Integer that specifies the batch size (e.g. 64)
neg_size: Integer that specifies the number of negative instances
Returns:
"""
data_len = data.shape[0]
dim = data.shape[1]
while True:
indices = np.random.choice(data_len, batch_size * neg_size)
samples = data[indices].reshape(batch_size, neg_size, dim)
yield samples
def write_topics(self, word_emb, topic_emb, epoch, vocab_inv):
"""Writes relevant topic information with similar words to .log file for each epoch.
Args:
word_emb: Numpy array that contains the word embeddings
topic_emb: Numpy array that contains the topic embeddings
epoch: Integer that specifies the current epoch
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
Returns:
"""
# In final epoch, write in main directory
if epoch == self.args.epochs:
topic_file = codecs.open(self.out_dir + '/topics.log', 'w', 'utf-8')
# In other epochs, write in subdirectory
else:
topic_file = codecs.open(self.out_dir + '/topics/topic_epoch_' + str(epoch) + '.log', 'w', 'utf-8')
# Get the most similar words for every topic
for topic in range(self.args.num_topics):
desc = topic_emb[topic]
sims = word_emb.dot(desc.T)
ordered_words = np.argsort(sims)[::-1]
found_words = 0
desc_list = []
# Save most similar words until enough words are found
for word in ordered_words:
if found_words == self.args.labeling_num_words:
break
elif vocab_inv[word] != "<unk>":
# Save word and associated similarity
desc_list.append(vocab_inv[word] + "|" + str(sims[word]))
found_words += 1
# Write most similar words to file
topic_file.write('Topic %d:\n' % topic)
topic_file.write(' '.join(desc_list) + '\n\n')
# Returns a dataframe containing the most similar words for every topic and a list containing the topic names
def get_similar_words(self, model, vocab_inv):
"""
Args:
model: Keras model object
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
Returns:
topic_labels: Lists that contains the topic names (Based on selecting the most similar word)
word_df: DataFrame that contains the most similar words of every topic
"""
# Get all word and topic embeddings
word_emb = K.get_value(model.get_layer('word_emb').embeddings)
word_emb = word_emb / np.linalg.norm(word_emb, axis=-1, keepdims=True)
topic_emb = K.get_value(model.get_layer('topic_emb').W)
topic_emb = topic_emb / np.linalg.norm(topic_emb, axis=-1, keepdims=True)
word_df = pd.DataFrame(columns=['topic', 'rank', 'word', "similarity"])
topic_labels = []
# Iterate through every topic and calculate the most similar words
for topic in range(self.args.num_topics):
desc = topic_emb[topic]
sims = word_emb.dot(desc.T)
ordered_words = np.argsort(sims)[::-1]
found_words = 0
# Calculate topic labels
for word in ordered_words:
if vocab_inv[word] != "<unk>" and vocab_inv[word] not in topic_labels:
topic_labels.append(vocab_inv[word])
break
# Calculate most similar words and save them in word_df
for word in ordered_words:
if found_words == self.args.labeling_num_words:
break
elif vocab_inv[word] != "<unk>":
word_df.loc[len(word_df)] = (
topic_labels[topic], found_words + 1, vocab_inv[word], str(round(sims[word], 2)))
found_words += 1
return topic_labels, word_df
# Returns a dataframe containing the most similar sentences for every topic
def get_similar_sentences(self, topic_labels, topic_probs):
"""Selects the most similar sentences for every topic.
Args:
topic_labels: List that contains the topic labels
topic_probs: Numpy array that contains the probability for every sentence-topic combination
Returns:
sentence_df: DataFrame that contains the most similar sentences for every topic
"""
train_sen_file = codecs.open('./datasets/' + self.args.domain + '/train.txt', 'r', 'utf-8')
sentences = []
# Read in all sentences that are in the input data
for line in train_sen_file:
words = line.strip().split()
sentences.append(words)
# Calculate the sentences with the highest topic probabilities
max_indices = np.argsort(topic_probs, axis=0)[::-1]
max_probs = np.sort(topic_probs, axis=0)[::-1]
sentence_df = pd.DataFrame(columns=['topic', 'rank', 'sentence'])
similar_sentences = codecs.open(self.out_dir + '/similar_sentences', 'w', 'utf-8')
# Iterate through the topics and get most similar sentences
for topic_ind in range(self.args.num_topics):
similar_sentences.write("Topic " + str(topic_ind) + ": " + str(topic_labels[topic_ind]) + "\n")
curr_ind_col = max_indices[:, topic_ind]
curr_prob_col = max_probs[:, topic_ind]
# Write the most similar sentences to a file and save them to the sentence_df DataFrame
for rank in range(self.args.num_sentences):
similar_sentences.write(' '.join(sentences[curr_ind_col[rank]]) + " --> Probability: "
+ str(curr_prob_col[rank]) + "\n")
sentence_df.loc[len(sentence_df)] = (
str(topic_labels[topic_ind]), rank + 1, ' '.join(sentences[curr_ind_col[rank]]))
similar_sentences.write("\n")
return sentence_df
def get_json_objects(self, model, vocab_inv, topic_probs):
"""Retrieves the nodes and links that should be saved in the JSON file for the visualization.
Args:
model: Keras model object
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
topic_probs: Numpy array that contains the probability for every sentence-topic combination
Returns:
nodes: List that contains all the node objects that should be shown in the visualization
links: List that contains all the link objects that should be shown in the visualization
"""
topic_labels, word_df = self.get_similar_words(model, vocab_inv)
sentences_df = self.get_similar_sentences(topic_labels, topic_probs)
df = pd.DataFrame(topic_probs, columns=topic_labels)
predict_labels = df.idxmax(axis=1)
corr_df = df.corr(method="pearson")
topic_occ = []
# Calculate the topic occurrences
for topic_label in topic_labels:
topic_occ.append((predict_labels == topic_label).sum())
nodes = []
links = []
# Specify the ranks for the most similar words and sentences based on the parameters
top_word_ranks = [i for i in range(1, self.args.num_words + 1)]
top_sen_ranks = [i for i in range(1, self.args.num_sentences + 1)]
# Get the topic labels
topic_labels = self.calculate_initial_labels(word_df)
# Iterate through all topics and get the most similar words and sentences
for i in range(corr_df.shape[1]):
top_words = word_df[word_df["topic"] == str(corr_df.columns[i])].word[0:len(top_word_ranks)].values
top_word_similarities = word_df[word_df["topic"] == str(corr_df.columns[i])].similarity[
0:len(top_word_ranks)].values
top_sentences = sentences_df[sentences_df["topic"] == str(corr_df.columns[i])].sentence.values
word_objects = []
sentence_objects = []
# Create word and sentence objects and append them to the nodes and links lists
for word_ind in range(len(top_words)):
word_objects.append(
TopWord(top_word_ranks[word_ind], top_words[word_ind], top_word_similarities[word_ind]))
for sen_ind in range(len(top_sentences)):
sentence_objects.append(TopSentence(top_sen_ranks[sen_ind], top_sentences[sen_ind]))
nodes.append(Node(str(topic_labels[i]), i, str(topic_occ[i]), word_objects, sentence_objects))
for j in range(0, i):
links.append(Link(nodes[i].id, nodes[j].id, corr_df.iloc[i, j].round(2)))
return nodes, links
def calculate_initial_labels(self, word_df):
"""Calculates the topic labels based on the number of shared hypernyms. If no shared hypernym is detected, the most similar word is used instead.
Args:
word_df: DataFrame that contains the most similar words of every topic
Returns:
topic_labels: List that contains the topic labels
"""
topic_word_lists = []
topic_labels = []
curr_topic = 0
num_hypernyms = 0
hypernym_file = codecs.open(self.out_dir + '/topic_labels.log', 'w', 'utf-8')
metric_file = codecs.open(self.out_dir + '/metrics.log', 'a', 'utf-8')
metric_comparison_file = codecs.open('./code/output_dir/' + self.args.domain + '/metrics.log', 'a', 'utf-8')
# Iterate through all the topics and append the most similar words
for curr_ind in range(self.args.num_topics):
topic_word_lists.append(
word_df.iloc[curr_topic * self.args.labeling_num_words: self.args.labeling_num_words * (curr_topic + 1),
2].values)
curr_topic += 1
# Go through the most similar words of every topic
for topic_li in topic_word_lists:
overall_hypernym_li = []
path_distance = 0
# Iterate through the words
for word in topic_li:
try:
inv_hypernym_path = wn.synsets(str(word))[0].hypernym_paths()[0][::-1]
except:
continue
specific_hypernym_li = []
# Iterate through the hypernym path and only consider the path where distance <= distance to root hypernym
for entry in inv_hypernym_path:
max_path_len = len(inv_hypernym_path) / 2
# Save hypernyms for every topic in a specific list
if path_distance < max_path_len:
specific_hypernym_li.append(str(entry)[8:-7])
path_distance += 1
path_distance = 0
# Save hypernyms of one topic in a large list that contains all hypernyms
overall_hypernym_li.append(specific_hypernym_li)
common_hypernyms = []
# Index and index2 are the lists that contain the hypernyms for the given topic number (e.g. index=1 --> Hypernyms for topic 1)
for index in range(len(overall_hypernym_li) - 1):
for index2 in range(index + 1, len(overall_hypernym_li)):
hypernym_found = False
# Iterate over all hypernyms
for entry in overall_hypernym_li[index]:
for entry2 in overall_hypernym_li[index2]:
# Save the hypernym if two different words are compared and no lower hypernym was already found
if entry == entry2 and hypernym_found is False:
common_hypernyms.append(entry)
hypernym_found = True
break
else:
continue
# If no hypernyms are found, use the most similar word
if len(common_hypernyms) == 0:
top_word = self.get_top_word(topic_li, topic_labels)
# If hypernyms are found, get the hypernym with the lowest number of occurrences that is not already used
else:
top_word = self.get_top_hypernym(topic_li, topic_labels, Counter(common_hypernyms).most_common())
num_hypernyms += sum(Counter(common_hypernyms).values())
topic_labels.append(top_word)
hypernym_file.write('Topic %s:' % (top_word) + "\n")
hypernym_file.write(' - Common hypernyms: %s' % (Counter(common_hypernyms).most_common()) + "\n")
hypernym_file.write(' - Similar words: %s' % (topic_li) + "\n" + "\n")
# Write information to multiple logging files
avg_num_hypernyms = float("{0:.2f}".format(num_hypernyms / float(self.args.num_topics)))
hypernyms_per_word = float("{0:.2f}".format(avg_num_hypernyms / float(self.args.labeling_num_words)))
hypernym_file.write('Hypernyms per Word: %s' % (hypernyms_per_word) + "\n")
hypernym_file.write('Average number of hypernyms: %s' % (avg_num_hypernyms) + "\n")
hypernym_file.write('Number of hypernyms found: %s' % num_hypernyms + "\n")
metric_file.write('Hypernyms per Word: %s' % (hypernyms_per_word) + "\n")
metric_file.write('Average number of hypernyms: %s' % (avg_num_hypernyms) + "\n")
metric_file.write('Number of hypernyms found: %s' % num_hypernyms + "\n" + "\n")
metric_comparison_file.write('Hypernyms per Word: %s' % (hypernyms_per_word) + "\n")
metric_comparison_file.write('Average number of hypernyms: %s' % (avg_num_hypernyms) + "\n")
metric_comparison_file.write('Number of hypernyms found: %s' % num_hypernyms + "\n" + "\n")
return topic_labels
def get_top_word(self, topic_li, topic_labels):
"""Retrieves the most similar word and sets it as label.
Args:
topic_li: Numpy array that contains the most similar words
topic_labels: List that contains the previous topic labels (Required because we do not want duplicate topic labels)
Returns:
"""
# Iterate through most similar words and take first one that is not already used and unequal to <unk>
for word in topic_li:
if word != "<unk>" and word not in topic_labels:
return word
# If every shared hypernyms is already used as label and every similar word is already used, use the generic name "topic_" + index instead
return "topic_" + str(len(topic_labels))
def get_top_hypernym(self, topic_li, topic_labels, common_hypernyms):
"""Retrives the most commonly shared lowest hypernym.
Args:
topic_li: Numpy array that contains the most similar words
topic_labels: List that contains the previous topic labels (Required because we do not want duplicate topic labels)
common_hypernyms: List that contains the shared hypernyms as (entry, occurrence) tuples
Returns:
"""
# Iterate through the common hypernyms and use the most frequent one that is not already used as label
for common_hypernym in common_hypernyms:
if common_hypernym[0] not in topic_labels:
return common_hypernym[0]
# If all shared hypernyms are already used as label for another topic, use the top word instead
return self.get_top_word(topic_li, topic_labels)
def write_json(self, model, vocab_inv, topic_probs):
"""Writes all relevant topic information to a JSON file so that it can be imported in the visualization and labeling tool.
Args:
model: Keras model object
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
topic_probs: Numpy array that contains the probability for every sentence-topic combination
Returns:
"""
nodes, links = self.get_json_objects(model, vocab_inv, topic_probs)
self.logger.info('Writing .json file...')
# Create a String that contains all the information in a .json format
node_str = '{ "nodes": ['
link_str = ' "links": ['
for node in nodes[:-1]:
node_str += node.to_json() + ","
node_str += nodes[-1].to_json() + " ],"
for link in links[:-1]:
link_str += link.to_json() + ","
link_str += links[-1].to_json() + " ] }"
json_str = node_str + link_str
with open(self.vis_path + "/topic_information.json", "w") as f:
f.write(json_str)
self.logger.info('.json written successfully')
def build_model(self):
"""Creates the model object, which is used to calculate topics, similar words, similar sentences, topic occurrences, and topic similarities
Returns:
model: Keras model object
"""
optimizer = get_optimizer(self.args)
self.logger.info('Building model')
self.logger.info(' Number of training examples: %d', len(self.train_x))
self.logger.info(' Length of vocab: %d', len(self.vocab))
def max_margin_loss(y_true, y_pred):
return K.mean(y_pred)
model = create_model(self.args, self.overall_maxlen, self.vocab)
# Freeze the word embedding layer
model.get_layer('word_emb').trainable = False
# Check option to fix clusters instead of training them
if self.args.fix_clusters == "yes":
model.get_layer('topic_emb').trainable = False
model.compile(optimizer=optimizer, loss=max_margin_loss, metrics=[max_margin_loss])
return model
def train_model(self, model):
"""Train the model based on the hyperparameters defined.
Args:
model: Keras model object that is returned after calling Train.build_model()
Returns:
"""
vocab_inv = {}
for w, ind in self.vocab.items():
vocab_inv[ind] = w
sen_gen = self.sentence_batch_generator(self.train_x, self.args.batch_size)
neg_gen = self.negative_batch_generator(self.train_x, self.args.batch_size, self.args.neg_size)
batches_per_epoch = len(self.train_x) / self.args.batch_size
# batches_per_epoch = 1000
self.logger.info("Batches per epoch: %d", batches_per_epoch)
self.logger.info(
'--------------------------------------------------------------------------------------------------------------------------')
min_loss = float('inf')
loss_li = []
for ii in xrange(self.args.epochs):
t0 = time()
loss, max_margin_loss = 0., 0.
for b in tqdm(xrange(batches_per_epoch)):
sen_input = sen_gen.next()
neg_input = neg_gen.next()
batch_loss, batch_max_margin_loss = model.train_on_batch([sen_input, neg_input],
np.ones((self.args.batch_size, 1)))
loss += batch_loss / batches_per_epoch
max_margin_loss += batch_max_margin_loss / batches_per_epoch
tr_time = time() - t0
self.logger.info('Epoch %d, train: %is' % (ii + 1, tr_time))
self.logger.info(' Total loss: %.4f, max_margin_loss: %.4f, ortho_reg: %.4f' % (
loss, max_margin_loss, loss - max_margin_loss))
if loss < min_loss:
self.logger.info(' Loss < min_loss')
min_loss = loss
word_emb = K.get_value(model.get_layer('word_emb').embeddings)
topic_emb = K.get_value(model.get_layer('topic_emb').W)
word_emb = word_emb / np.linalg.norm(word_emb, axis=-1, keepdims=True)
topic_emb = topic_emb / np.linalg.norm(topic_emb, axis=-1, keepdims=True)
model.save_weights(self.out_dir + '/model_param')
self.write_topics(word_emb, topic_emb, ii + 1, vocab_inv)
training_detail_file = codecs.open(self.out_dir + '/training_details.log', 'a', 'utf-8')
training_detail_file.write('Epoch %d, train: %is' % (ii + 1, tr_time) + "\n")
training_detail_file.write('Total loss: %.4f, max_margin_loss: %.4f, ortho_reg: %.4f' % (
loss, max_margin_loss, loss - max_margin_loss) + "\n")
loss_li.append(float("{0:.4f}".format(loss)))
else:
self.logger.info(' Loss > min_loss')
loss_li.append(float("{0:.4f}".format(loss)))
# In Final Epoch
if ii + 1 == self.args.epochs:
self.logger.info('Training finished')
self.logger.info('Calculating most representative topic sentences...')
test_fn = K.function([model.get_layer('sentence_input').input, K.learning_phase()],
[model.get_layer('att_weights').output, model.get_layer('p_t').output])
# If argument is not given explicitly by the user calculate good default value (One batch + One batch per 5000 entries)
if self.args.probability_batches == 0:
num_probability_batches = 1 + len(self.train_x) / 5000
self.logger.info('Using %s probability batches...', num_probability_batches)
else:
num_probability_batches = self.args.probability_batches
split_inputs = np.array_split(self.train_x, num_probability_batches)
_, topic_probs = test_fn([split_inputs[0], 0])
for split_input in split_inputs[1:]:
_, curr_topic_prob = test_fn([split_input, 0])
topic_probs = np.append(topic_probs, curr_topic_prob, axis=0)
self.logger.info('Most representative sentences calculated successfully')
self.write_json(model, vocab_inv, topic_probs)
self.save_model_loss(loss_li)
# os.system(
# "python ./code/coherence_score.py -f ./code/output_dir/organic_food_preprocessed/" + self.args.conf + "/topics.log -c ./preprocessed_data/organic_food_preprocessed/train.txt -o ./code/output_dir/organic_food_preprocessed/" + self.args.conf)
def save_model_loss(self, loss_li):
"""Creates plots of the training loss and saves them as .png and .pdf files.
Args:
loss_li: List that contains the model loss for every epoch
Returns:
"""
metric_file = codecs.open(self.out_dir + '/metrics.log', 'a', 'utf-8')
metric_comparison_file = codecs.open('./code/output_dir/' + self.args.domain + '/metrics.log', 'a', 'utf-8')
metric_file.write('Final loss: %s' % (loss_li[-1]) + "\n")
metric_file.write('Loss development: %s' % (loss_li) + "\n" + "\n")
metric_comparison_file.write('Final loss: %s' % (loss_li[-1]) + "\n")
metric_comparison_file.write('Loss development: %s' % (loss_li) + "\n" + "\n")
epoch_li = [epoch for epoch in range(1, self.args.epochs + 1)]
fig, ax = plt.subplots(figsize=(16, 8))
ax.set_xlabel("Epoch", fontsize=18, weight="bold")
ax.set_ylabel("Loss", fontsize=18, weight="bold")
ax.set_title('Model loss', fontsize=20, weight="bold")
plt.plot(epoch_li, loss_li)
plt.savefig(self.out_dir + "/model_loss.pdf", format="pdf")
plt.savefig(self.out_dir + "/model_loss.png", format="png")
def main():
logging.basicConfig(
filename='out.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("--domain", dest="domain", type=str, metavar='<str>', required=True,
help="domain of the corpus")
parser.add_argument("--conf", dest="conf", type=str, metavar='<str>', required=True,
help="Train configuration for the given domain")
parser.add_argument("--emb-path", dest="emb_path", type=str, metavar='<str>', required=True,
help="The path to the word embedding file")
parser.add_argument("--num-topics", dest="num_topics", type=int, metavar='<int>', default=20,
help="The number of topics specified that are calculated by the model (default=20)")
parser.add_argument("--vocab-size", dest="vocab_size", type=int, metavar='<int>', default=9000,
help="Vocab size. '0' means no limit (default=9000)")
parser.add_argument("--num-words", dest="num_words", type=int, metavar='<int>', default=10,
help="Number of most similar words displayed for each topic")
parser.add_argument("--num-sentences", dest="num_sentences", type=int, metavar='<int>', default=10,
help="Number of most similar sentences displayed for each topic")
parser.add_argument("--labeling-num-words", dest="labeling_num_words", type=int, metavar='<int>', default=25,
help="Number of most similar words used to generate the labels")
parser.add_argument("--batch-size", dest="batch_size", type=int, metavar='<int>', default=64,
help="Batch size used for training (default=64)")
parser.add_argument("--epochs", dest="epochs", type=int, metavar='<int>', default=20,
help="Number of epochs (default=20)")
parser.add_argument("--neg-size", dest="neg_size", type=int, metavar='<int>', default=20,
help="Number of negative instances (default=20)")
parser.add_argument("--maxlen", dest="maxlen", type=int, metavar='<int>', default=0,
help="Maximum allowed number of words during training. '0' means no limit (default=0)")
parser.add_argument("--algorithm", dest="algorithm", type=str, metavar='<str>', default='adam',
help="Optimization algorithm (rmsprop|sgd|adagrad|adadelta|adam|adamax) (default=adam)")
parser.add_argument("--fix-clusters", dest="fix_clusters", type=str, metavar='<str>', default="no",
help="Fix initial clusters (yes or no)")
parser.add_argument("--ortho-reg", dest="ortho_reg", type=float, metavar='<float>', default=0.1,
help="The weight of orthogonal regularization (default=0.1)")
parser.add_argument("--probability-batches", dest="probability_batches", type=int, metavar='<int>', default=0,
help="Calculation of topic probabilities is split into batches to avoid out of memory error."
"If an out of memory error or bus error occurs, increase this value.")
parser.add_argument("--emb-dim", dest="emb_dim", type=int, metavar='<int>', default=300,
help="Embeddings dimension (default=300)")
parser.add_argument("--emb-type", dest="emb_type", type=str, metavar='<str>', default="glove_finetuned",
help="The type of word vectors to use")
args = parser.parse_args()
out_dir = './code/output_dir/' + args.domain + '/' + args.conf
U.mkdir_p(out_dir)
U.mkdir_p(out_dir + "/topics")
U.print_args(args, out_dir + '/train_params')
U.print_args(args, out_dir + '/metrics.log')
assert args.algorithm in {'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'}, "Invalid algorithm argument"
assert args.fix_clusters in {'yes', 'no'}, "Invalid fix_clusters argument"
assert args.labeling_num_words >= args.num_words, "Number of words used to generate labels must be >= Number of words displayed in visualization"
np.random.seed(1234)
trainer = Train(args, logger, out_dir)
model = trainer.build_model()
trainer.train_model(model)
if __name__ == "__main__":
main()
|
py | b4022e80ea47a5737251d8919ba82dddaae3f7ee | """track_scraper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('manage_music.urls')),
url(
r'^favicon.ico$',
RedirectView.as_view(
url="/static/favicon.ico",
permanent=False),
name="favicon"
),
]
|
py | b4022f32d8d51c5e3782e605228dc24c8529d3f4 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
class PyCdatLite(PythonPackage):
"""Cdat-lite is a Python package for managing and analysing climate
science data. It is a subset of the Climate Data Analysis Tools (CDAT)
developed by PCMDI at Lawrence Livermore National Laboratory."""
homepage = "http://proj.badc.rl.ac.uk/cedaservices/wiki/CdatLite"
url = "https://pypi.io/packages/source/c/cdat-lite/cdat-lite-6.0.1.tar.gz"
version('6.0.1', '6d5a6e86f15ce15291d25feab8793248')
depends_on("netcdf")
depends_on("[email protected]:2.8", type=('build', 'run'))
depends_on("py-numpy", type=('build', 'run'))
depends_on('py-setuptools', type='build')
phases = ['install']
def install(self, spec, prefix):
"""Install everything from build directory."""
install_args = self.install_args(spec, prefix)
# Combine all phases into a single setup.py command,
# otherwise extensions are rebuilt without rpath by install phase:
self.setup_py('build_ext', '--rpath=%s' % ":".join(self.rpath),
'build_py', 'build_scripts',
'install', *install_args)
|
py | b4022f8a7437fbe1582d31eeb219df0652441047 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class ApiregistrationV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_api_service(self, body, **kwargs):
"""
create an APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_api_service(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_api_service_with_http_info(body, **kwargs)
else:
(data) = self.create_api_service_with_http_info(body, **kwargs)
return data
def create_api_service_with_http_info(self, body, **kwargs):
"""
create an APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_api_service_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_api_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_api_service`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_api_service(self, name, **kwargs):
"""
delete an APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_api_service(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_api_service_with_http_info(name, **kwargs)
else:
(data) = self.delete_api_service_with_http_info(name, **kwargs)
return data
def delete_api_service_with_http_info(self, name, **kwargs):
"""
delete an APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_api_service_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_api_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_api_service`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_api_service(self, **kwargs):
"""
delete collection of APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_api_service(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_api_service_with_http_info(**kwargs)
else:
(data) = self.delete_collection_api_service_with_http_info(**kwargs)
return data
def delete_collection_api_service_with_http_info(self, **kwargs):
"""
delete collection of APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_api_service_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_api_service" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_api_service(self, **kwargs):
"""
list or watch objects of kind APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_api_service(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1APIServiceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_api_service_with_http_info(**kwargs)
else:
(data) = self.list_api_service_with_http_info(**kwargs)
return data
def list_api_service_with_http_info(self, **kwargs):
"""
list or watch objects of kind APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_api_service_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1APIServiceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_api_service" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIServiceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_api_service(self, name, body, **kwargs):
"""
partially update the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_api_service_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_api_service_with_http_info(name, body, **kwargs)
return data
def patch_api_service_with_http_info(self, name, body, **kwargs):
"""
partially update the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_api_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_api_service`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_api_service`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_api_service_status(self, name, body, **kwargs):
"""
partially update status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_api_service_status_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_api_service_status_with_http_info(name, body, **kwargs)
return data
def patch_api_service_status_with_http_info(self, name, body, **kwargs):
"""
partially update status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_api_service_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_api_service_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_api_service_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_api_service(self, name, **kwargs):
"""
read the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_api_service_with_http_info(name, **kwargs)
else:
(data) = self.read_api_service_with_http_info(name, **kwargs)
return data
def read_api_service_with_http_info(self, name, **kwargs):
"""
read the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_api_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_api_service`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_api_service_status(self, name, **kwargs):
"""
read status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_api_service_status_with_http_info(name, **kwargs)
else:
(data) = self.read_api_service_status_with_http_info(name, **kwargs)
return data
def read_api_service_status_with_http_info(self, name, **kwargs):
"""
read status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_api_service_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_api_service_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_api_service(self, name, body, **kwargs):
"""
replace the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_api_service_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_api_service_with_http_info(name, body, **kwargs)
return data
def replace_api_service_with_http_info(self, name, body, **kwargs):
"""
replace the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_api_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_api_service`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_api_service`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_api_service_status(self, name, body, **kwargs):
"""
replace status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_api_service_status_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_api_service_status_with_http_info(name, body, **kwargs)
return data
def replace_api_service_status_with_http_info(self, name, body, **kwargs):
"""
replace status of the specified APIService
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_api_service_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_api_service_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_api_service_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | b402302564d0b278e9875dca3fcce784f6b0f5f3 |
import inspect
import re
from operator import attrgetter
from JumpScale.core.baseclasses import BaseType
from JumpScale.core.inifile.IniFile import IniFile
from JumpScale import j
"""
Note: does not support defaults (yet)
"""
class AutoCreateIniFile(IniFile):
def __init__(self, path, auto_create=False):
self._pmd_path = path
if auto_create and not j.system.fs.isFile(path):
create=True
else:
create=False
IniFile.__init__(self, path, create=create)
def remove(self):
j.system.fs.remove(self._pmd_path)
class ConfigSection(BaseType):
def __init__(self, config, name):
BaseType.__init__(self)
self._section_name = name
self._config = config
if not self._config.checkSection(self._section_name):
self._config.addSection(self._section_name)
self._config.write()
for attrName, config in list(self.pm_property_metadata.items()):
self._setProperty(attrName, config['self'], config['default'])
self._config.setParam(self._section_name, attrName, getattr(self, attrName))
def pm_remove(self):
self._config.removeSection(self._section_name)
self._config.removeSection(self._getParamsSectionName())
self._config.write()
def _setProperty(self, attrName, attr, default):
if self._config.checkParam(self._section_name, attrName):
str_val = self._config.getValue(self._section_name, attrName)
val = attr.fromString(str_val)
else:
val = default # TODO if not BaseType.emptyDefault(default) else None
privateName = self._getPrivateName(attrName)
setattr(self, privateName, val)
p = property(
fget=attrgetter(privateName),
fset=self._attrSetter(attrName, attr),
)
setattr(self.__class__, attrName, p)
def _getPrivateName(self, name):
return "_%s" % name
def _attrSetter(self, name, basetype):
def setter(o, val):
if basetype.check(val):
setattr(o, self._getPrivateName(name), val)
str_val = basetype.toString(val)
j._setConfigParam(name, str_val)
else:
raise ValueError("Invalid value for this parameter")
return setter
def _setConfigParam(self, name, val):
self._config.setParam(self._section_name, name, val)
def __str__(self):
config_basename = j.system.fs.getBaseName(self._config._pmd_path)
return "Config Section '%s' of %s" % (self._section_name, config_basename)
def __repr__(self):
return str(self)
class Config(object):
def __init__(self, filename):
self._filename = filename
self._setConfig()
for section_name, section_class in self._genSectionClasses(self):
section_instance = section_class(self._config, section_name)
setattr(self, section_name, section_instance)
def pm_addSection(self, name, klass):
if hasattr(self, name):
raise ValueError("Instance already has section '%s'" % name)
instance = klass(self._config, name)
setattr(self, name, instance)
def pm_removeSection(self, name):
"""
Add a section to the config
@param name: Name of the section to remove
@type name: string
"""
section = getattr(self, name)
section.pm_remove()
delattr(self, name)
def _genSectionClasses(self, o):
for attrName, attr in self._genAttrs(o):
if inspect.isclass(attr) and issubclass(attr, ConfigSection):
yield attrName, attr
def _genAttrs(self, o):
for attrName in self._genAttrNames(self):
attr = getattr(self, attrName)
yield attrName, attr
def _genAttrNames(self, o):
for attrName in dir(o):
yield attrName
def _setConfig(self):
self._config = AutoCreateIniFile(self._filename, auto_create=True)
def remove(self):
self._config.remove()
def __str__(self):
config_basename = j.system.fs.getBaseName(self._filename)
return "Config %s" % (config_basename)
def __repr__(self):
return str(self) |
py | b4023221a95546dc993d51573e7c2a5e05d7d6a9 | # Copyright 2017 Dimitri Capitaine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import phoenixdb
from sqlalchemy import types
from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
from sqlalchemy.exc import CompileError
from sqlalchemy.sql.compiler import DDLCompiler
from sqlalchemy.types import BIGINT, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, NUMERIC,\
SMALLINT, TIME, TIMESTAMP, VARBINARY, VARCHAR
if sys.version_info.major == 3:
from urllib.parse import urlunsplit, SplitResult, urlencode
else:
from urllib import urlencode
from urlparse import urlunsplit, SplitResult
class PhoenixDDLCompiler(DDLCompiler):
def visit_primary_key_constraint(self, constraint):
if constraint.name is None:
raise CompileError("Can't create primary key without a name.")
return DDLCompiler.visit_primary_key_constraint(self, constraint)
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|UPSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
class PhoenixExecutionContext(DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
class PhoenixDialect(DefaultDialect):
'''Phoenix dialect
dialect:: phoenix
:name: Phoenix
note::
The Phoenix dialect for SQLAlchemy is incomplete. It implements the functions required by Hue
for basic operation, but little else.
Connecting
----------
The connection URL has the format of phoenix://host:port
This format does not allow for specifying the http scheme, or the URL path the the server uses.
Setting tls=True sets the server URL scheme to https.
If the path arg is set , it used as the path of the server URL.
The phoenix-specific authentication options can be set via the standard connect_args argument.
Connecting to an unsecure server::
create_engine('phoenix://localhost:8765')
Connecting to a secure server via SPNEGO (after kinit)::
create_engine('phoenix://localhost:8765', tls=True, connect_args={'authentication': 'SPNEGO'})
Connecting to a secure server via Knox::
create_engine('phoenix://localhost:8765', tls=True, path='/gateway/avatica/'\
connect_args={'authentication':'BASIC', 'avatica_user':'user', 'avatica_password':'password'})
'''
name = "phoenix"
driver = "phoenixdb"
ddl_compiler = PhoenixDDLCompiler
execution_ctx_cls = PhoenixExecutionContext
def __init__(self, tls=False, path='/', **opts):
'''
:param tls:
If True, then use https for connecting, otherwise use http
:param path:
The path component of the connection URL
'''
# There is no way to pass these via the SqlAlchemy url object
self.tls = tls
self.path = path
super(PhoenixDialect, self).__init__(self, **opts)
@classmethod
def dbapi(cls):
return phoenixdb
def create_connect_args(self, url):
connect_args = dict()
if url.username is not None:
connect_args['user'] = url.username
if url.password is not None:
connect_args['password'] = url.username
phoenix_url = urlunsplit(SplitResult(
scheme='https' if self.tls else 'http',
netloc='{}:{}'.format(url.host, 8765 if url.port is None else url.port),
path=self.path,
query=urlencode(url.query),
fragment='',
))
return [phoenix_url], connect_args
def has_table(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
return bool(connection.connect().connection.meta().get_tables(
tableNamePattern=table_name,
schemaPattern=schema,
typeList=('TABLE', 'SYSTEM_TABLE')))
def get_schema_names(self, connection, **kw):
schemas = connection.connect().connection.meta().get_schemas()
schema_names = [schema['TABLE_SCHEM'] for schema in schemas]
# Phoenix won't return the default schema if there aren't any tables in it
if '' not in schema_names:
schema_names.insert(0, '')
return schema_names
def get_table_names(self, connection, schema=None, order_by=None, **kw):
'''order_by is ignored'''
if schema is None:
schema = ''
tables = connection.connect().connection.meta().get_tables(
schemaPattern=schema, typeList=('TABLE', 'SYSTEM TABLE'))
return [table['TABLE_NAME'] for table in tables]
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = ''
return connection.connect().connection.meta().get_tables(schemaPattern=schema,
typeList=('VIEW'))
def get_columns(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_columns(
schemaPattern=schema, tableNamePattern=table_name)
return [self._map_column(row) for row in raw]
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
columns = connection.connect().connection.meta().get_columns(
schemaPattern=schema, tableNamePattern=table_name, *kw)
pk_columns = [col['COLUMN_NAME'] for col in columns if col['KEY_SEQ'] > 0]
return {'constrained_columns': pk_columns}
def get_indexes(self, conn, table_name, schema=None, **kw):
'''This information does not seem to be exposed via Avatica
TODO: Implement by directly querying SYSTEM tables ? '''
return []
def get_foreign_keys(self, conn, table_name, schema=None, **kw):
'''Foreign keys are a foreign concept to Phoenix,
but SqlAlchemy cannot parse the DB schema if it's not implemented '''
return []
def _map_column(self, raw):
cooked = {}
cooked['name'] = raw['COLUMN_NAME']
cooked['type'] = COLUMN_DATA_TYPE[raw['TYPE_ID']]
cooked['nullable'] = bool(raw['IS_NULLABLE'])
cooked['autoincrement'] = bool(raw['IS_AUTOINCREMENT'])
cooked['comment'] = raw['REMARKS']
cooked['default'] = None # Not apparent how to get this from the metatdata
return cooked
class TINYINT(types.Integer):
__visit_name__ = "SMALLINT"
class UNSIGNED_TINYINT(types.Integer):
__visit_name__ = "SMALLINT"
class UNSIGNED_INTEGER(types.Integer):
__visit_name__ = "INTEGER"
class DOUBLE(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_DOUBLE(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_FLOAT(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_LONG(types.BIGINT):
__visit_name__ = "BIGINT"
class UNSIGNED_TIME(types.TIME):
__visit_name__ = "TIME"
class UNSIGNED_DATE(types.DATE):
__visit_name__ = "DATE"
class UNSIGNED_TIMESTAMP(types.TIMESTAMP):
__visit_name__ = "TIMESTAMP"
class ROWID (types.String):
__visit_name__ = "VARCHAR"
COLUMN_DATA_TYPE = {
-6: TINYINT,
-5: BIGINT,
-3: VARBINARY,
1: CHAR,
2: NUMERIC,
3: DECIMAL,
4: INTEGER,
5: SMALLINT,
6: FLOAT,
8: DOUBLE,
9: UNSIGNED_INTEGER,
10: UNSIGNED_LONG,
11: UNSIGNED_TINYINT,
12: VARCHAR,
13: ROWID,
14: UNSIGNED_FLOAT,
15: UNSIGNED_DOUBLE,
16: BOOLEAN,
18: UNSIGNED_TIME,
19: UNSIGNED_DATE,
20: UNSIGNED_TIMESTAMP,
91: DATE,
92: TIME,
93: TIMESTAMP
}
|
py | b402327137035f5c23206fb5e2bf83ce4d3cc782 | """Package containing configuration files and helpers."""
|
py | b402329afa4499b351a4afa0429795b92a35775e | # -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on textile."""
import codecs
import os
import re
try:
from textile import textile
except ImportError:
textile = None # NOQA
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing
class CompileTextile(PageCompiler):
"""Compile textile into HTML."""
name = "textile"
def compile_html(self, source, dest, is_two_file=True):
if textile is None:
req_missing(['textile'], 'build this site (compile Textile)')
makedirs(os.path.dirname(dest))
with codecs.open(dest, "w+", "utf8") as out_file:
with codecs.open(source, "r", "utf8") as in_file:
data = in_file.read()
if not is_two_file:
data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1]
output = textile(data, head_offset=1)
out_file.write(output)
def create_post(self, path, onefile=False, **kw):
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
with codecs.open(path, "wb+", "utf8") as fd:
if onefile:
fd.write('<notextile> <!--\n')
for k, v in metadata.items():
fd.write('.. {0}: {1}\n'.format(k, v))
fd.write('--></notextile>\n\n')
fd.write("\nWrite your post here.")
|
py | b4023412b6d2c54b65202ac1883178662e7f0ef5 | from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import relationship
from .. import Base
from .base_model import BaseModel
class User(Base, BaseModel):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
# some other information
purchases = relationship('Transaction', backref='purchases', uselist=True)
def __init__(self, id=None):
self.id = id
def __repr__(self):
return f"<User(id={self.id})>"
|
py | b40234ebbc17ef1df692e4b8103cf234531c9dfc | '''
import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
def send_email(subject,
body,
hostname,
port,
user,
password,
recipients,
attachment_path=None):
"""Sends an email, and possibly an attachment, to the given recipients.
Args:
subject: The email subject text.
body: The email body text.
host: Hostname of the SMTP email server.
port: Port on the host to connect on.
user: Email address to send the email from.
password: Password of the sending address.
recipients: A list of email addresses to send the message to.
attachment_path (optional): Path to the attachment file.
"""
# Create message and add body
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = user
msg['To'] = ', '.join(recipients)
msg.attach(MIMEText(body))
# Add attachment to message
if attachment_path != None:
attachment = open(attachment_path, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="{}"'.format(attachment_path))
msg.attach(part)
# Send the message
server = smtplib.SMTP(hostname, port)
server.starttls()
server.login(user, password)
server.sendmail(from_addr = user,
to_addrs = recipients,
msg = msg.as_string())
server.quit()
'''
import numpy as np
import sys
def select_filters(flts=[]):
yield from _close_shutter(simu=False)
for key, item in filters.items():
yield from mv(item, 0)
for ii in flts:
yield from mv(filters["filter" + str(ii)], 1)
def user_scan(
exposure_time,
period,
out_x,
out_y,
out_z,
rs=1,
out_r=0,
xanes_flag=False,
xanes_angle=0,
note="",
):
# Ni
angle_ini = 0
yield from mv(zps.pi_r, angle_ini)
print("start taking tomo and xanes of Ni")
yield from move_zp_ccd(8.35, move_flag=1)
yield from fly_scan(
exposure_time,
relative_rot_angle=180,
period=period,
out_x=out_x,
out_y=out_y,
out_z=out_z,
rs=rs,
parkpos=out_r,
note=note + "_8.35keV",
)
yield from bps.sleep(2)
yield from move_zp_ccd(8.3, move_flag=1)
yield from fly_scan(
exposure_time,
relative_rot_angle=180,
period=period,
out_x=out_x,
out_y=out_y,
out_z=out_z,
rs=rs,
parkpos=out_r,
note=note + "8.3keV",
)
yield from mv(zps.pi_r, xanes_angle)
if xanes_flag:
yield from xanes_scan2(
eng_list_Ni,
exposure_time,
chunk_size=5,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
note=note + "_xanes",
)
yield from mv(zps.pi_r, angle_ini)
"""
# Co
print('start taking tomo and xanes of Co')
yield from mv(zps.pi_r, angle_ini)
yield from move_zp_ccd(7.75, move_flag=1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(7.66, move_flag=1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from mv(zps.pi_r, xanes_angle)
if xanes_flag:
yield from xanes_scan2(eng_list_Co, 0.05, chunk_size=5, out_x=out_x, out_y=out_y,note=note)
yield from mv(zps.pi_r, angle_ini)
# Mn
print('start taking tomo and xanes of Mn')
yield from mv(zps.pi_r, angle_ini)
yield from move_zp_ccd(6.59, move_flag=1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(6.49, move_flag=1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from mv(zps.pi_r, xanes_angle)
if xanes_flag:
yield from xanes_scan2(eng_list_Mn, 0.1, chunk_size=5, out_x=out_x, out_y=out_y,note=note)
yield from mv(zps.pi_r, angle_ini)
"""
def user_xanes(out_x, out_y, note=""):
"""
yield from move_zp_ccd(7.4, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from xanes_scan2(eng_list_Co, 0.05, chunk_size=5, out_x=out_x, out_y=out_y, note=note)
yield from bps.sleep(5)
"""
print("please wait for 5 sec...starting Ni xanes")
yield from move_zp_ccd(8.3, move_flag=1)
yield from bps.sleep(1)
yield from xanes_scan2(
eng_list_Ni, 0.05, chunk_size=5, out_x=out_x, out_y=out_y, note=note
)
"""
def user_flyscan(out_x, out_y, note=''):
yield from move_zp_ccd(8.35, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(8.3, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(7.75, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(7.66, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(6.59, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
yield from move_zp_ccd(6.49, move_flag=1, xanes_flag='2D')
yield from bps.sleep(1)
yield from fly_scan(0.05, relative_rot_angle=180, period=0.05, out_x=out_x, out_y=out_y,out_z=0, rs=2, parkpos=0, note=note)
"""
def overnight_fly():
insert_text("start William Zhou in-situ scan at 10min interval for 70 times:")
for i in range(70):
print(f"current scan# {i}")
yield from abs_set(shutter_open, 1)
yield from sleep(1)
yield from abs_set(shutter_open, 1)
yield from sleep(2)
yield from fly_scan(
exposure_time=0.05,
relative_rot_angle=180,
period=0.05,
chunk_size=20,
out_x=0,
out_y=0,
out_z=1000,
out_r=0,
rs=3,
simu=False,
note="[email protected],w/filter 1&2",
)
yield from abs_set(shutter_close, 1)
yield from sleep(1)
yield from abs_set(shutter_close, 1)
yield from sleep(2)
yield from bps.sleep(520)
insert_text("finished pin-situ scan")
def insitu_xanes_scan(
eng_list,
exposure_time=0.2,
out_x=0,
out_y=0,
out_z=0,
out_r=0,
repeat_num=1,
sleep_time=1,
note="None",
):
insert_text("start from now on, taking in-situ NMC charge/discharge xanes scan:")
for i in range(repeat_num):
print(f"scan #{i}\n")
yield from xanes_scan2(
eng_list,
exposure_time=exposure_time,
chunk_size=2,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
note=f"{note}_#{i}",
)
current_time = str(datetime.now().time())[:8]
print(f"current time is {current_time}")
insert_text(f"current scan finished at: {current_time}")
yield from abs_set(shutter_close, 1)
yield from bps.sleep(1)
yield from abs_set(shutter_close, 1)
print(f"\nI'm sleeping for {sleep_time} sec ...\n")
yield from bps.sleep(sleep_time)
insert_text("finished in-situ xanes scan !!")
def user_fly_scan(
exposure_time=0.1, period=0.1, chunk_size=20, rs=1, note="", simu=False, md=None
):
"""
motor_x_ini = zps.pi_x.position
# motor_x_out = motor_x_ini + txm_out_x
motor_y_ini = zps.sy.position
motor_y_out = motor_y_ini + out_y
motor_z_ini = zps.sz.position
motor_z_out = motor_z_ini + out_z
motor_r_ini = zps.pi_r.position
motor_r_out = motor_r_ini + out_r
"""
motor_r_ini = zps.pi_r.position
motor = [zps.sx, zps.sy, zps.sz, zps.pi_r, zps.pi_x]
detectors = [Andor, ic3]
offset_angle = -2.0 * rs
current_rot_angle = zps.pi_r.position
# target_rot_angle = current_rot_angle + relative_rot_angle
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motor],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"period": period,
"chunk_size": chunk_size,
"rs": rs,
"note": note if note else "None",
},
"plan_name": "fly_scan",
"num_bkg_images": chunk_size,
"num_dark_images": chunk_size,
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"motor_pos": wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
print("set rotation speed: {} deg/sec".format(rs))
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def inner_scan():
# close shutter, dark images: numer=chunk_size (e.g.20)
print("\nshutter closed, taking dark images...")
yield from _take_dark_image(detectors, motor, num_dark=1, simu=simu)
yield from mv(zps.pi_x, 0)
yield from mv(zps.pi_r, -50)
yield from _set_rotation_speed(rs=rs)
# open shutter, tomo_images
yield from _open_shutter(simu=simu)
print("\nshutter opened, taking tomo images...")
yield from mv(zps.pi_r, -50 + offset_angle)
status = yield from abs_set(zps.pi_r, 50, wait=False)
yield from bps.sleep(2)
while not status.done:
yield from trigger_and_read(list(detectors) + motor)
# bkg images
print("\nTaking background images...")
yield from _set_rotation_speed(rs=30)
yield from mv(zps.pi_r, 0)
yield from mv(zps.pi_x, 12)
yield from mv(zps.pi_r, 70)
yield from trigger_and_read(list(detectors) + motor)
yield from _close_shutter(simu=simu)
yield from mv(zps.pi_r, 0)
yield from mv(zps.pi_x, 0)
yield from mv(zps.pi_x, 0)
# yield from mv(zps.pi_r, motor_r_ini)
uid = yield from inner_scan()
print("scan finished")
txt = get_scan_parameter()
insert_text(txt)
print(txt)
return uid
def tmp_scan():
x = np.array([0, 1, 2, 3]) * 0.015 * 2560 + zps.sx.position
y = np.array([0, 1, 2, 3]) * 0.015 * 2160 + zps.sy.position
i = 0
j = 0
for xx in x:
i += 1
for yy in y:
j += 1
print(f"current {i}_{j}: x={xx}, y={yy}")
yield from mv(zps.sx, xx, zps.sy, yy)
yield from xanes_scan2(
eng_Ni_list_xanes,
0.05,
chunk_size=4,
out_x=2000,
out_y=0,
out_z=0,
out_r=0,
simu=False,
note="NCM532_72cycle_discharge_{i}_{j}",
)
def mosaic_fly_scan(
x_list,
y_list,
z_list,
r_list,
exposure_time=0.1,
relative_rot_angle=150,
period=0.1,
chunk_size=20,
out_x=None,
out_y=None,
out_z=4400,
out_r=90,
rs=1,
note="",
simu=False,
relative_move_flag=0,
traditional_sequence_flag=0,
):
txt = "start mosaic_fly_scan, containing following fly_scan\n"
insert_text(txt)
insert_text("x_list = ")
insert_text(str(x_list))
insert_text("y_list = ")
insert_text(str(y_list))
insert_text("z_list = ")
insert_text(str(z_list))
insert_text("r_list = ")
insert_text(str(r_list))
nx = len(x_list)
ny = len(y_list)
for i in range(ny):
for j in range(nx):
success = False
count = 1
while not success and count < 20:
try:
RE(
mv(
zps.sx,
x_list[j],
zps.sy,
y_list[i],
zps.sz,
z_list[i],
zps.pi_r,
r_list[i],
)
)
RE(
fly_scan(
exposure_time,
relative_rot_angle,
period,
chunk_size,
out_x,
out_y,
out_z,
out_r,
rs,
note,
simu,
relative_move_flag,
traditional_sequence_flag,
md=None,
)
)
success = True
except:
count += 1
RE.abort()
Andor.unstage()
print("sleeping for 30 sec")
RE(bps.sleep(30))
txt = f"Redo scan at x={x_list[i]}, y={y_list[i]}, z={z_list[i]} for {count} times"
print(txt)
insert_text(txt)
txt = "mosaic_fly_scan finished !!\n"
insert_text(txt)
def mosaic2d_lists(x_start, x_end, x_step, y_start, y_end, y_step, z, r):
x_range = list(range(x_start, x_end + x_step, x_step))
y_range = list(range(y_start, y_end + y_step, y_step))
x_list = x_range * len(y_range)
y_list = []
for y in y_range:
y_list.extend([y] * len(x_range))
z_list = [z] * len(x_list)
r_list = [r] * len(x_list)
return x_list, y_list, z_list, r_list
def multi_pos_3D_xanes(
eng_list,
x_list=[0],
y_list=[0],
z_list=[0],
r_list=[0],
exposure_time=0.05,
relative_rot_angle=182,
rs=2,
):
"""
the sample_out position is in its absolute value:
will move sample to out_x (um) out_y (um) out_z(um) and out_r (um) to take background image
to run:
RE(multi_pos_3D_xanes(Ni_eng_list, x_list=[a, b, c], y_list=[aa,bb,cc], z_list=[aaa,bbb, ccc], r_list=[0, 0, 0], exposure_time=0.05, relative_rot_angle=185, rs=3, out_x=1500, out_y=-1500, out_z=-770, out_r=0, note='NC')
"""
num_pos = len(x_list)
for i in range(num_pos):
print(f"currently, taking 3D xanes at position {i}\n")
yield from mv(
zps.sx, x_list[i], zps.sy, y_list[i], zps.sz, z_list[i], zps.pi_r, r_list[i]
)
yield from bps.sleep(2)
note_pos = note + f"position_{i}"
yield from xanes_3D(
eng_list,
exposure_time=exposure_time,
relative_rot_angle=relative_rot_angle,
period=exposure_time,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
rs=rs,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=note_pos,
)
insert_text(f"finished 3D xanes scan for {note_pos}")
def mk_eng_list(elem):
if elem.split("_")[-1] == "wl":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem.split("_")[0]
+ "/eng_list_"
+ elem.split("_")[0]
+ "_s_xanes_standard_21pnt.txt")
elif elem.split("_")[-1] == "101":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem.split("_")[0]
+ "/eng_list_"
+ elem.split("_")[0]
+ "_xanes_standard_101pnt.txt")
elif elem.split("_")[-1] == "63":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem.split("_")[0]
+ "/eng_list_"
+ elem.split("_")[0]
+ "_xanes_standard_63pnt.txt")
return eng_list
def sort_in_pos(in_pos_list):
x_list = []
y_list = []
z_list = []
r_list = []
for ii in range(len(in_pos_list)):
if in_pos_list[ii][0] is None:
x_list.append(zps.sx.position)
else:
x_list.append(in_pos_list[ii][0])
if in_pos_list[ii][1] is None:
y_list.append(zps.sy.position)
else:
y_list.append(in_pos_list[ii][1])
if in_pos_list[ii][2] is None:
z_list.append(zps.sz.position)
else:
z_list.append(in_pos_list[ii][2])
if in_pos_list[ii][3] is None:
r_list.append(zps.pi_r.position)
else:
r_list.append(in_pos_list[ii][3])
return (x_list, y_list, z_list, r_list)
def multi_edge_xanes(
elements=["Ni_wl"],
scan_type = '3D',
filters={"Ni_filters": [1, 2, 3]},
exposure_time={"Ni_exp": 0.05},
relative_rot_angle=185,
rs=1,
in_pos_list = [[None, None, None, None]],
out_pos=[None, None, None, None],
note="",
relative_move_flag=0,
binning = [2, 2],
simu=False):
x_list, y_list, z_list, r_list = sort_in_pos(in_pos_list)
for elem in elements:
for key in filters.keys():
if elem.split("_")[0] == key.split("_")[0]:
yield from select_filters(filters[key])
break
else:
yield from select_filters([])
for key in exposure_time.keys():
if elem.split("_")[0] == key.split("_")[0]:
exposure = exposure_time[key]
print(elem, exposure)
break
else:
exposure = 0.05
print('use default exposure time 0.05s')
eng_list = mk_eng_list(elem)
if scan_type == '2D':
yield from multipos_2D_xanes_scan2(eng_list,
x_list,
y_list,
z_list,
r_list,
out_x=out_pos[0],
out_y=out_pos[1],
out_z=out_pos[2],
out_r=out_pos[3],
exposure_time=exposure,
chunk_size=5,
simu=simu,
relative_move_flag=relative_move_flag,
note=note,
md=None,
sleep_time=0,
binning = [2, 2],
repeat_num=1)
elif scan_type == '3D':
yield from multi_pos_xanes_3D(eng_list,
x_list,
y_list,
z_list,
r_list,
exposure_time=exposure,
relative_rot_angle=relative_rot_angle,
rs=rs,
out_x=out_pos[0],
out_y=out_pos[1],
out_z=out_pos[2],
out_r=out_pos[3],
note=note,
simu=simu,
relative_move_flag=relative_move_flag,
rot_first_flag=1,
sleep_time=0,
binning = [2, 2],
repeat=1)
else:
print('wrong scan type')
return
def fly_scan2(
exposure_time=0.1,
start_angle = None,
relative_rot_angle=180,
period=0.15,
chunk_size=20,
out_x=None,
out_y=2000,
out_z=None,
out_r=None,
rs=1,
note="",
simu=False,
relative_move_flag=1,
rot_first_flag=1,
filters=[],
rot_back_velo=30,
md=None,
binning=[1, 1]
):
"""
Inputs:
-------
exposure_time: float, in unit of sec
start_angle: float
starting angle
relative_rot_angle: float,
total rotation angles start from current rotary stage (zps.pi_r) position
period: float, in unit of sec
period of taking images, "period" should >= "exposure_time"
chunk_size: int, default setting is 20
number of images taken for each trigger of Andor camera
out_x: float, default is 0
relative movement of sample in "x" direction using zps.sx to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_y: float, default is 0
relative movement of sample in "y" direction using zps.sy to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_z: float, default is 0
relative movement of sample in "z" direction using zps.sz to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_r: float, default is 0
relative movement of sample by rotating "out_r" degrees, using zps.pi_r to move out sample
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
rs: float, default is 1
rotation speed in unit of deg/sec
note: string
adding note to the scan
simu: Bool, default is False
True: will simulate closing/open shutter without really closing/opening
False: will really close/open shutter
"""
global ZONE_PLATE
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if not (start_angle is None):
yield from mv(zps.pi_r, start_angle)
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if not (out_x is None) else motor_x_ini
motor_y_out = motor_y_ini + out_y if not (out_y is None) else motor_y_ini
motor_z_out = motor_z_ini + out_z if not (out_z is None) else motor_z_ini
motor_r_out = motor_r_ini + out_r if not (out_r is None) else motor_r_ini
else:
motor_x_out = out_x if not (out_x is None) else motor_x_ini
motor_y_out = out_y if not (out_y is None) else motor_y_ini
motor_z_out = out_z if not (out_z is None) else motor_z_ini
motor_r_out = out_r if not (out_r is None) else motor_r_ini
motors = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
offset_angle = -2 * rs
current_rot_angle = zps.pi_r.position
target_rot_angle = current_rot_angle + relative_rot_angle
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motors],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"start_angle": start_angle,
"relative_rot_angle": relative_rot_angle,
"period": period,
"chunk_size": chunk_size,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"rs": rs,
"relative_move_flag": relative_move_flag,
"rot_first_flag": rot_first_flag,
"filters": [filt.name for filt in filters] if filters else "None",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "fly_scan2",
"num_bkg_images": 20,
"num_dark_images": 20,
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
yield from mv(Andor.cam.acquire, 0)
yield from mv(Andor.cam.bin_y, binning[0],
Andor.cam.bin_x, binning[1])
yield from mv(Andor.cam.acquire_time, exposure_time)
yield from mv(Andor.cam.acquire_period, max(period, exposure_time+0.01))
# Andor.cam.acquire_period.put(period)
# yield from _set_andor_param(
# exposure_time=exposure_time, period=period, chunk_size=chunk_size
# )
yield from _set_rotation_speed(rs=rs)
print("set rotation speed: {} deg/sec".format(rs))
# We manually stage the Andor detector below. See there for why....
# Stage everything else here in the usual way.
@stage_decorator([ic3] + motors)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
# set filters
for flt in filters:
yield from mv(flt, 1)
yield from mv(flt, 1)
# yield from mv(Andor.cam.num_images, chunk_size, timeout=10) ## commented out by XH
# Manually stage the Andor. This creates a Resource document that
# contains the path to the HDF5 file where the detector writes. It also
# encodes the so-called 'frame_per_point' which here is what this plan
# calls chunk_size. The chunk_size CANNOT BE CHANGED later in the scan
# unless we unstage and re-stage the detector and generate a new
# Resource document.
# This approach imposes some unfortunate overhead (closing the HDF5
# file, opening a new one, going through all the steps to set the Area
# Detector's filepath PV, etc.). A better approach has been sketched
# in https://github.com/bluesky/area-detector-handlers/pull/11. It
# allows a single HDF5 file to contain multiple chunk_sizes.
yield from bps.stage(Andor)
yield from bps.sleep(1)
yield from mv(Andor.cam.num_images, chunk_size, timeout=10) ## added by XH
# open shutter, tomo_images
yield from _open_shutter(simu=simu)
print("\nshutter opened, taking tomo images...")
yield from mv(zps.pi_r, current_rot_angle + offset_angle)
status = yield from abs_set(zps.pi_r, target_rot_angle, wait=False)
yield from bps.sleep(2)
while not status.done:
yield from trigger_and_read(list(detectors) + motors)
# bkg images
print("\nTaking background images...")
yield from _set_rotation_speed(rs=rot_back_velo)
yield from mv(Andor.cam.num_images, 20)
# Now that the new chunk_size has been set (20) create a new Resource
# document by unstage and re-staging the detector.
yield from bps.unstage(Andor)
yield from bps.stage(Andor)
yield from bps.sleep(1)
yield from _take_bkg_image(
motor_x_out,
motor_y_out,
motor_z_out,
motor_r_out,
detectors,
motors,
num_bkg=1,
simu=False,
traditional_sequence_flag=rot_first_flag,
)
# dark images
yield from _close_shutter(simu=simu)
print("\nshutter closed, taking dark images...")
yield from _take_dark_image(detectors, motors, num_dark=1, simu=simu)
yield from bps.unstage(Andor)
# restore fliters
yield from _move_sample_in(
motor_x_ini,
motor_y_ini,
motor_z_ini,
motor_r_ini,
trans_first_flag=rot_first_flag,
)
for flt in filters:
yield from mv(flt, 0)
yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("scan finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
def grid_z_scan(
zstart=-0.03,
zstop=0.03,
zsteps=5,
gmesh=[[-5, 0, 5], [-5, 0, 5]],
out_x=-100,
out_y=-100,
chunk_size=10,
exposure_time=0.1,
note="",
md=None,
simu=False,
):
"""
scan the zone-plate to find best focus
use as:
z_scan(zstart=-0.03, zstop=0.03, zsteps=5, gmesh=[[-5, 0, 5], [-5, 0, 5]], out_x=-100, out_y=-100, chunk_size=10, exposure_time=0.1, fn='/home/xf18id/Documents/tmp/z_scan.h5', note='', md=None)
Input:
---------
zstart: float, relative starting position of zp_z
zstop: float, relative zstop position of zp_z
zsteps: int, number of zstep between [zstart, zstop]
out_x: float, relative amount to move sample out for zps.sx
out_y: float, relative amount to move sample out for zps.sy
chunk_size: int, number of images per each subscan (for Andor camera)
exposure_time: float, exposure time for each image
note: str, experiment notes
"""
detectors = [Andor]
motor = zp.z
z_ini = motor.position # zp.z intial position
z_start = z_ini + zstart
z_stop = z_ini + zstop
zp_x_ini = zp.x.position
zp_y_ini = zp.y.position
# detectors = [Andor]
y_ini = zps.sy.position # sample y position (initial)
y_out = y_ini + out_y if not (out_y is None) else y_ini# sample y position (out-position)
x_ini = zps.sx.position
x_out = x_ini + out_x if not (out_x is None) else x_ini
yield from mv(Andor.cam.acquire, 0)
yield from mv(Andor.cam.image_mode, 0)
yield from mv(Andor.cam.num_images, chunk_size)
yield from mv(Andor.cam.acquire_time, exposure_time)
period_cor = max(exposure_time+0.01, 0.05)
yield from mv(Andor.cam.acquire_period, period_cor)
_md = {
"detectors": [det.name for det in detectors],
"motors": [motor.name],
"XEng": XEng.position,
"plan_args": {
"zstart": zstart,
"zstop": zstop,
"zsteps": zsteps,
"gmesh": gmesh,
"out_x": out_x,
"out_y": out_y,
"chunk_size": chunk_size,
"exposure_time": exposure_time,
"note": note if note else "None",
},
"plan_name": "grid_z_scan",
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"motor_pos": wh_pos(print_on_screen=0),
}
_md.update(md or {})
my_var = np.linspace(z_start, z_stop, zsteps)
try:
dimensions = [(motor.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
@stage_decorator(list(detectors) + [motor])
@run_decorator(md=_md)
def inner_scan():
yield from _open_shutter(simu=simu)
for xx in gmesh[0]:
for yy in gmesh[1]:
yield from mv(zp.x, zp_x_ini+xx, zp.y, zp_y_ini+yy, wait=True)
#yield from mv(zp.y, zp_y_ini+yy, wait=True)
yield from bps.sleep(1)
for x in my_var:
yield from mv(motor, x)
yield from trigger_and_read(list(detectors) + [motor], name='primary')
# backgroud images
yield from mv(zps.sx, x_out, zps.sy, y_out, wait=True)
yield from bps.sleep(1)
yield from trigger_and_read(list(detectors) + [motor], name='flat')
yield from mv(zps.sx, x_ini, zps.sy, y_ini, wait=True)
yield from bps.sleep(1)
#yield from mv(zps.sy, y_ini, wait=True)
yield from _close_shutter(simu=simu)
yield from bps.sleep(1)
yield from trigger_and_read(list(detectors) + [motor], name='dark')
yield from mv(zps.sx, x_ini)
yield from mv(zps.sy, y_ini)
yield from mv(zp.z, z_ini)
yield from mv(zp.x, zp_x_ini, zp.y, zp_y_ini, wait=True)
yield from mv(Andor.cam.image_mode, 1)
uid = yield from inner_scan()
yield from mv(Andor.cam.image_mode, 1)
yield from _close_shutter(simu=simu)
txt = get_scan_parameter()
insert_text(txt)
print(txt)
def dummy_scan( exposure_time=0.1,
start_angle = None,
relative_rot_angle=180,
period=0.15,
out_x=None,
out_y=2000,
out_z=None,
out_r=None,
rs=1,
note="",
simu=False,
relative_move_flag=1,
rot_first_flag=1,
filters=[],
rot_back_velo=30,
repeat=1):
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if not (start_angle is None):
yield from mv(zps.pi_r, start_angle)
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if not (out_x is None) else motor_x_ini
motor_y_out = motor_y_ini + out_y if not (out_y is None) else motor_y_ini
motor_z_out = motor_z_ini + out_z if not (out_z is None) else motor_z_ini
motor_r_out = motor_r_ini + out_r if not (out_r is None) else motor_r_ini
else:
motor_x_out = out_x if not (out_x is None) else motor_x_ini
motor_y_out = out_y if not (out_y is None) else motor_y_ini
motor_z_out = out_z if not (out_z is None) else motor_z_ini
motor_r_out = out_r if not (out_r is None) else motor_r_ini
motors = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
offset_angle = -2 * rs
current_rot_angle = zps.pi_r.position
target_rot_angle = current_rot_angle + relative_rot_angle
_md={'dummy scan':'dummy scan'}
yield from mv(Andor.cam.acquire, 0)
yield from _set_andor_param(
exposure_time=exposure_time, period=period
)
yield from mv(Andor.cam.image_mode, 1)
yield from mv(Andor.cam.acquire, 1)
@stage_decorator(motors)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
# open shutter, tomo_images
yield from _open_shutter(simu=simu)
print("\nshutter opened, taking tomo images...")
yield from _set_rotation_speed(rs=rs)
yield from mv(zps.pi_r, current_rot_angle + offset_angle)
status = yield from abs_set(zps.pi_r, target_rot_angle, wait=False)
while not status.done:
yield from bps.sleep(1)
yield from _set_rotation_speed(rs=30)
print("set rotation speed: {} deg/sec".format(rs))
status = yield from abs_set(zps.pi_r, current_rot_angle + offset_angle, wait=False)
while not status.done:
yield from bps.sleep(1)
yield from abs_set(zps.sx, motor_x_out, wait=True)
yield from abs_set(zps.sy, motor_y_out, wait=True)
yield from abs_set(zps.sz, motor_z_out, wait=True)
yield from abs_set(zps.pi_r, motor_r_out, wait=True)
yield from abs_set(zps.sx, motor_x_ini, wait=True)
yield from abs_set(zps.sy, motor_y_ini, wait=True)
yield from abs_set(zps.sz, motor_z_ini, wait=True)
yield from abs_set(zps.pi_r, motor_r_ini, wait=True)
for ii in range(repeat):
yield from fly_inner_scan()
print('{}th scan finished'.format(ii))
yield from _set_rotation_speed(rs=rot_back_velo)
print("dummy scan finished")
def radiographic_record(exp_t=0.1, period=0.1, t_span=10, stop=True,
out_x=None, out_y=None, out_z=None, out_r=None,
filters=[], md={}, note="", simu=False,
rot_first_flag=1, relative_move_flag=1):
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if not (out_x is None) else motor_x_ini
motor_y_out = motor_y_ini + out_y if not (out_y is None) else motor_y_ini
motor_z_out = motor_z_ini + out_z if not (out_z is None) else motor_z_ini
motor_r_out = motor_r_ini + out_r if not (out_r is None) else motor_r_ini
else:
motor_x_out = out_x if not (out_x is None) else motor_x_ini
motor_y_out = out_y if not (out_y is None) else motor_y_ini
motor_z_out = out_z if not (out_z is None) else motor_z_ini
motor_r_out = out_r if not (out_r is None) else motor_r_ini
motors = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
_md = {
"detectors": ["Andor"],
# "motors": [mot.name for mot in motors],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exp_t,
"period": period,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"time_span": t_span,
"filters": [filt.name for filt in filters] if filters else "None",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "radiographic_record",
"num_bkg_images": 20,
"num_dark_images": 20,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE
}
_md.update(md or {})
yield from mv(Andor.cam.acquire, 0)
yield from _set_andor_param(
exposure_time=exp_t, period=period
)
yield from mv(Andor.cam.image_mode, 0)
@stage_decorator(list(detectors))
# @bpp.monitor_during_decorator([Andor.cam.num_images_counter])
@run_decorator(md=_md)
def rad_record_inner():
yield from _open_shutter(simu=simu)
for flt in filters:
yield from mv(flt, 1)
yield from mv(flt, 1)
yield from bps.sleep(1)
yield from mv(Andor.cam.num_images, int(t_span/period))
yield from trigger_and_read([Andor])
yield from mv(zps.sx, motor_x_out,
zps.sy, motor_y_out,
zps.sz, motor_z_out,
zps.pi_r, motor_r_out)
yield from mv(Andor.cam.num_images,20)
yield from trigger_and_read([Andor])
yield from _close_shutter(simu=simu)
yield from mv(zps.sx, motor_x_ini,
zps.sy, motor_y_ini,
zps.sz, motor_z_ini,
zps.pi_r, motor_r_ini)
yield from trigger_and_read([Andor])
yield from mv(Andor.cam.image_mode, 1)
for flt in filters:
yield from mv(flt, 0)
yield from rad_record_inner()
# def multi_pos_2D_and_3D_xanes(elements=['Ni'], sam_in_pos_list_2D=[[[0, 0, 0, 0],]], sam_out_pos_list_2D=[[[0, 0, 0, 0],]], sam_in_pos_list_3D=[[[0, 0, 0, 0],]], sam_out_pos_list_3D=[[[0, 0, 0, 0],]], exposure_time=[0.05], relative_rot_angle=182, relative_move_flag=False, rs=1, note=''):
# sam_in_pos_list_2D = np.asarray(sam_in_pos_list_2D)
# sam_out_pos_list_2D = np.asarray(sam_out_pos_list_2D)
# sam_in_pos_list_3D = np.asarray(sam_in_pos_list_3D)
# sam_out_pos_list_3D = np.asarray(sam_out_pos_list_3D)
# exposure_time = np.asarray(exposure_time)
# if exposure_time.shape[0] == 1:
# exposure_time = np.ones(len(elements))*exposure_time[0]
# elif len(elements) != exposure_time.shape[0]:
# # to do in bs manner
# pass
#
# eng_list = []
# for ii in elements:
# eng_list.append(list(np.genfromtxt('/NSLS2/xf18id1/SW/xanes_ref/'+ii+'/eng_list_'+ii+'_xanes_standard.txt')))
#
# for ii in range(sam_in_pos_list_2D.shape[0]):
# for jj in range(len(elements)):
# x_list = [sam_in_pos_list_2D[ii, :, 0]]
# y_list = [sam_in_pos_list_2D[ii, :, 1]]
# z_list = [sam_in_pos_list_2D[ii, :, 2]]
# r_list = [sam_in_pos_list_2D[ii, :, 3]]
# out_x = sam_out_pos_list_2D[ii, :, 0]
# out_y = sam_out_pos_list_2D[ii, :, 1]
# out_z = sam_out_pos_list_2D[ii, :, 2]
# out_r = sam_out_pos_list_2D[ii, :, 3]
# yield from multipos_2D_xanes_scan2(eng_list[jj], x_list, y_list, z_list, r_list,
# out_x=out_x, out_y=out_y, out_z=out_z, out_r=out_r,
# exposure_time=exposure_time[jj], chunk_size=5,
# simu=False, relative_move_flag=relative_move_flag, note=note, md=None, sleep_time=0, repeat_num=1)
#
# for ii in range(sam_in_pos_list_3D.shape[0]):
# for jj in range(len(elements)):
# x_list = [sam_in_pos_list_3D[ii, :, 0]]
# y_list = [sam_in_pos_list_3D[ii, :, 1]]
# z_list = [sam_in_pos_list_3D[ii, :, 2]]
# r_list = [sam_in_pos_list_3D[ii, :, 3]]
# out_x = sam_out_pos_list_3D[ii, :, 0]
# out_y = sam_out_pos_list_3D[ii, :, 1]
# out_z = sam_out_pos_list_3D[ii, :, 2]
# out_r = sam_out_pos_list_3D[ii, :, 3]
# yield from multi_pos_3D_xanes(eng_list[jj], x_list, y_list, z_list, r_list,
# exposure_time=exposure_time[jj], relative_rot_angle=relative_rot_angle, rs=rs,
# out_x=out_x, out_y=out_y, out_z=out_z, out_r=out_r, note=note, simu=False,
# relative_move_flag=relative_move_flag, traditional_sequence_flag=1, sleep_time=0, repeat=1)
# def multi_pos_2D_xanes_and_3D_tomo(elements=['Ni'], sam_in_pos_list_2D=[[[0, 0, 0, 0]]], sam_out_pos_list_2D=[[[0, 0, 0, 0]]], sam_in_pos_list_3D=[[[0, 0, 0, 0]]], sam_out_pos_list_3D=[[[0, 0, 0, 0]]],
# exposure_time_2D=[0.05], exposure_time_3D=[0.05], relative_rot_angle=182, rs=1, eng_3D=[8.4], note='', relative_move_flag=False):
# sam_in_pos_list_2D = np.asarray(sam_in_pos_list_2D)
# sam_out_pos_list_2D = np.asarray(sam_out_pos_list_2D)
# sam_in_pos_list_3D = np.asarray(sam_in_pos_list_3D)
# sam_out_pos_list_3D = np.asarray(sam_out_pos_list_3D)
# exposure_time_2D = np.asarray(exposure_time_2D)
# exposure_time_3D = np.asarray(exposure_time_3D)
# if exposure_time_2D.shape[0] == 1:
# exposure_time_2D = np.ones(len(elements))*exposure_time_2D[0]
# elif len(elements) != exposure_time_2D.shape[0]:
# # to do in bs manner
# pass
#
# if exposure_time_3D.shape[0] == 1:
# exposure_time_3D = np.ones(len(elements))*exposure_time_3D[0]
# elif len(elements) != exposure_time_3D.shape[0]:
# # to do in bs manner
# pass
#
# eng_list = []
# for ii in elements:
# eng_list.append(list(np.genfromtxt('/NSLS2/xf18id1/SW/xanes_ref/'+ii+'/eng_list_'+ii+'_xanes_standard.txt')))
#
# for ii in range(sam_in_pos_list_2D.shape[0]):
# for jj in range(len(elements)):
# x_list = sam_in_pos_list_2D[ii, :, 0]
# y_list = sam_in_pos_list_2D[ii, :, 1]
# z_list = sam_in_pos_list_2D[ii, :, 2]
# r_list = sam_in_pos_list_2D[ii, :, 3]
# out_x = sam_out_pos_list_2D[ii, 0]
# out_y = sam_out_pos_list_2D[ii, 1]
# out_z = sam_out_pos_list_2D[ii, 2]
# out_r = sam_out_pos_list_2D[ii, 3]
# print(x_list)
# print(y_list)
# print(z_list)
# print(r_list)
# print(out_x)
# print(out_y)
# print(out_z)
# print(out_r)
# yield from multipos_2D_xanes_scan2(eng_list[jj], x_list, y_list, z_list, r_list,
# out_x=out_x, out_y=out_y, out_z=out_z, out_r=out_r,
# exposure_time=exposure_time_2D[jj], chunk_size=5,
# simu=False, relative_move_flag=relative_move_flag, note=note, md=None, sleep_time=0, repeat_num=1)
#
# for ii in range(sam_in_pos_list_3D.shape[0]):
# for jj in range(len(elements)):
# x_list = sam_in_pos_list_3D[ii, :, 0]
# y_list = sam_in_pos_list_3D[ii, :, 1]
# z_list = sam_in_pos_list_3D[ii, :, 2]
# r_list = sam_in_pos_list_3D[ii, :, 3]
# out_x = sam_out_pos_list_3D[ii, 0]
# out_y = sam_out_pos_list_3D[ii, 1]
# out_z = sam_out_pos_list_3D[ii, 2]
# out_r = sam_out_pos_list_3D[ii, 3]
# yield from multi_pos_xanes_3D(eng_3D, x_list, y_list, z_list, r_list,
# exposure_time=exposure_time_3D[jj], relative_rot_angle=relative_rot_angle, rs=rs,
# out_x=out_x, out_y=out_y, out_z=out_z, out_r=out_r, note=note, simu=False,
# relative_move_flag=relative_move_flag, traditional_sequence_flag=1, sleep_time=0, repeat=1)
############ old routine: 2D routine works but 3D routine has some bugs -- start
# def multi_pos_2D_and_3D_xanes(elements=['Ni_short'], filters=[[1, 2, 3]], sam_in_pos_list_2D=[[[0, 0, 0, 0]]], sam_out_pos_list_2D=[[[0, 0, 0, 0]]], sam_in_pos_list_3D=[[[0, 0, 0, 0]]], sam_out_pos_list_3D=[[[0, 0, 0, 0]]],
# exposure_time_2D=[0.05], exposure_time_3D=[0.05], relative_rot_angle=182, rs=1, sleep_time=0, repeat_num=1, note='', relative_move_flag=0, simu=False):
# """
# pos_list layer structure: 1st layer -> energy
# 2nd layer -> multiple positions at the given energy
# 3rd layer -> individual postion in the multiple poistion list
# """
# for kk in range(repeat_num):
# sam_in_pos_list_2D = np.asarray(sam_in_pos_list_2D)
# sam_out_pos_list_2D = np.asarray(sam_out_pos_list_2D)
# sam_in_pos_list_3D = np.asarray(sam_in_pos_list_3D)
# sam_out_pos_list_3D = np.asarray(sam_out_pos_list_3D)
# exposure_time_2D = np.asarray(exposure_time_2D)
# exposure_time_3D = np.asarray(exposure_time_3D)
# if exposure_time_2D.shape[0] == 1:
# exposure_time_2D = np.ones(len(elements))*exposure_time_2D[0]
# elif len(elements) != exposure_time_2D.shape[0]:
# # to do in bs manner
# pass
#
# if exposure_time_3D.shape[0] == 1:
# exposure_time_3D = np.ones(len(elements))*exposure_time_3D[0]
# elif len(elements) != exposure_time_3D.shape[0]:
# # to do in bs manner
# pass
#
# eng_list = []
# for ii in elements:
# if ii.split('_')[1] == 'wl':
# eng_list.append(list(np.genfromtxt('/NSLS2/xf18id1/SW/xanes_ref/'+ii.split('_')[0]+'/eng_list_'+ii.split('_')[0]+'_s_xanes_standard_21pnt.txt')))
# elif ii.split('_')[1] == '101':
# eng_list.append(list(np.genfromtxt('/NSLS2/xf18id1/SW/xanes_ref/'+ii+'/eng_list_'+ii+'_xanes_standard_101pnt.txt')))
# elif ii.split('_')[1] == '63':
# eng_list.append(list(np.genfromtxt('/NSLS2/xf18id1/SW/xanes_ref/'+ii+'/eng_list_'+ii+'_xanes_standard_63pnt.txt')))
#
# eng_list = np.array(eng_list)
#
# if sam_in_pos_list_2D.size != 0:
# for ii in range(sam_in_pos_list_2D.shape[0]):
# for jj in range(len(elements)):
# if filters[jj]:
# select_filters(filters[jj])
## yield from _close_shutter(simu=simu)
## yield from mv(filter1, 0)
## yield from mv(filter2, 0)
## yield from mv(filter3, 0)
## yield from mv(filter4, 0)
## for flt in filters[jj]:
## if flt == 'filter1':
## yield from mv(filter1, 1)
## elif flt == 'filter2':
## yield from mv(filter2, 1)
## elif flt == 'filter3':
## yield from mv(filter3, 1)
## elif flt == 'filter4':
## yield from mv(filter4, 1)
# x_list = sam_in_pos_list_2D[ii, :, 0]
# y_list = sam_in_pos_list_2D[ii, :, 1]
# z_list = sam_in_pos_list_2D[ii, :, 2]
# r_list = sam_in_pos_list_2D[ii, :, 3]
# out_x = sam_out_pos_list_2D[ii, :, 0]
# out_y = sam_out_pos_list_2D[ii, :, 1]
# out_z = sam_out_pos_list_2D[ii, :, 2]
# out_r = sam_out_pos_list_2D[ii, :, 3]
# print(x_list)
# print(y_list)
# print(z_list)
# print(r_list)
# print(out_x)
# print(out_y)
# print(out_z)
# print(out_r)
# yield from multipos_2D_xanes_scan2(eng_list[jj], x_list, y_list, z_list, r_list,
# out_x=out_x, out_y=out_y, out_z=out_z, out_r=out_r,
# exposure_time=exposure_time_2D[jj], chunk_size=5,
# simu=simu, relative_move_flag=relative_move_flag, note=note, md=None, sleep_time=0, repeat_num=1)
#
# if sam_in_pos_list_3D.size != 0:
# for ii in range(sam_in_pos_list_3D.shape[0]):
# for jj in range(len(elements)):
# if filters[jj]:
# select_filters(filters[jj])
## yield from _close_shutter(simu=simu)
## yield from mv(filter1, 0)
## yield from mv(filter2, 0)
## yield from mv(filter3, 0)
## yield from mv(filter4, 0)
## for flt in filters[jj]:
## if flt == 'filter1':
## yield from mv(filter1, 1)
## elif flt == 'filter2':
## yield from mv(filter2, 1)
## elif flt == 'filter3':
## yield from mv(filter3, 1)
## elif flt == 'filter4':
## yield from mv(filter4, 1)
# x_list = sam_in_pos_list_3D[ii, :, 0]
# y_list = sam_in_pos_list_3D[ii, :, 1]
# z_list = sam_in_pos_list_3D[ii, :, 2]
# r_list = sam_in_pos_list_3D[ii, :, 3]
# out_x = sam_out_pos_list_3D[ii, :, 0]
# out_y = sam_out_pos_list_3D[ii, :, 1]
# out_z = sam_out_pos_list_3D[ii, :, 2]
# out_r = sam_out_pos_list_3D[ii, :, 3]
# print(x_list, out_x, out_y, out_z, out_r)
# yield from multi_pos_xanes_3D(eng_list[jj], x_list, y_list, z_list, r_list,
# exposure_time=exposure_time_3D[jj], relative_rot_angle=relative_rot_angle, rs=rs,
# out_x=out_x, out_y=out_y, out_z=out_z, out_r=out_r, note=note, simu=simu,
# relative_move_flag=relative_move_flag, traditional_sequence_flag=1, sleep_time=0, repeat=1)
# if kk != (repeat_num-1):
# print(f'We are in multi_pos_2D_and_3D_xanes cycle # {kk}; we are going to sleep for {sleep_time} seconds ...')
# yield from bps.sleep(sleep_time)
############ old routine: 2D routine works but 3D routine has some bugs -- end
def multi_pos_2D_and_3D_xanes(
elements=["Ni_wl"],
filters={"Ni_filters": [1, 2, 3]},
sam_in_pos_list_2D={"Ni_2D_in_pos_list": [[0, 0, 0, 0]]},
sam_out_pos_list_2D={"Ni_2D_out_pos_list": [[0, 0, 0, 0]]},
sam_in_pos_list_3D={"Ni_3D_in_pos_list": [[0, 0, 0, 0]]},
sam_out_pos_list_3D={"Ni_3D_out_pos_list": [[0, 0, 0, 0]]},
exposure_time_2D={"Ni_2D_exp": 0.05},
exposure_time_3D={"Ni_3D_exp": 0.05},
relative_rot_angle=185,
rs=1,
sleep_time=0,
repeat_num=1,
note="",
relative_move_flag=0,
simu=False):
xanes2D = {}
xanes3D = {}
for kk in range(repeat_num):
for elem in elements:
### if there is a filter combination is defined for the element
for key, item in sam_in_pos_list_2D.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes2D[elem+'_2D'] = {}
xanes2D[elem+'_2D']['eng'] = elem
xanes2D[elem+'_2D']['in_pos'] = item
xanes2D[elem+'_2D']['in_pos_defined'] = True
for key, item in filters.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes2D[elem+'_2D']['filter'] = item
else:
xanes2D[elem+'_2D']['filter'] = []
for key, item in sam_out_pos_list_2D.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes2D[elem+'_2D']['out_pos'] = item
xanes2D[elem+'_2D']['out_pos_defined'] = True
for key, item in exposure_time_2D.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes2D[elem+'_2D']['exposure'] = item
xanes2D[elem+'_2D']['exposure_defined'] = True
if not (xanes2D[elem+'_2D']['in_pos_defined'] &
xanes2D[elem+'_2D']['out_pos_defined'] &
xanes2D[elem+'_2D']['exposure_defined']):
print(elem+' 2D scan setup is not correct. Quit.')
sys.exit()
for elem in elements:
### if there is a filter combination is defined for the element
for key, item in sam_in_pos_list_3D.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes3D[elem+'_3D'] = {}
xanes3D[elem+'_3D']['eng'] = elem
xanes3D[elem+'_3D']['in_pos'] = item
xanes3D[elem+'_3D']['in_pos_defined'] = True
for key, item in filters.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes3D[elem+'_3D']['filter'] = item
else:
xanes3D[elem+'_3D']['filter'] = []
for key, item in sam_out_pos_list_3D.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes3D[elem+'_3D']['out_pos'] = item
xanes3D[elem+'_3D']['out_pos_defined'] = True
for key, item in exposure_time_3D.items():
if elem.split("_")[0] == key.split("_")[0]:
xanes3D[elem+'_3D']['exposure'] = item
xanes3D[elem+'_3D']['exposure_defined'] = True
if not (xanes3D[elem+'_3D']['in_pos_defined'] &
xanes3D[elem+'_3D']['out_pos_defined'] &
xanes3D[elem+'_3D']['exposure_defined']):
print(elem+' 3D scan setup is not correct. Quit.')
sys.exit()
for elem2D in xanes2D:
x_list_2D = []
y_list_2D = []
z_list_2D = []
r_list_2D = []
out_x_2D = []
out_y_2D = []
out_z_2D = []
out_r_2D = []
for inpos in elem2D['in_pos']:
x_list_2D.append(inpos[0])
y_list_2D.append(inpos[1])
z_list_2D.append(inpos[2])
r_list_2D.append(inpos[3])
for outpos in elem2D['out_pos']:
out_x_2D.append(outpos[0])
out_y_2D.append(outpos[1])
out_z_2D.append(outpos[2])
out_r_2D.append(outpos[3])
if len(x_list_2D) != len(out_x_2D):
print('x_list_2D and out_x_2D are not equal in length. Quit.')
sys.exit()
select_filters(elem2D['filter'])
if elem2D['eng'].split("_")[-1] == "wl":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem2D['eng'].split("_")[0]
+ "/eng_list_"
+ elem2D['eng'].split("_")[0]
+ "_s_xanes_standard_21pnt.txt")
elif elem2D['eng'].split("_")[-1] == "101":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem2D['eng'].split("_")
+ "/eng_list_"
+ elem2D['eng'].split("_")
+ "_xanes_standard_101pnt.txt")
elif elem2D['eng'].split("_")[-1] == "63":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem2D['eng'].split("_")
+ "/eng_list_"
+ elem2D['eng'].split("_")
+ "_xanes_standard_63pnt.txt")
yield from multipos_2D_xanes_scan2(
eng_list,
x_list_2D,
y_list_2D,
z_list_2D,
r_list_2D,
out_x=out_x_2D,
out_y=out_y_2D,
out_z=out_z_2D,
out_r=out_r_2D,
exposure_time=elem2D['exposure'],
chunk_size=5,
simu=simu,
relative_move_flag=relative_move_flag,
note=note,
md=None,
sleep_time=0,
repeat_num=1)
for elem3D in xanes3D:
x_list_3D = []
y_list_3D = []
z_list_3D = []
r_list_3D = []
out_x_3D = []
out_y_3D = []
out_z_3D = []
out_r_3D = []
for inpos in elem3D['in_pos']:
x_list_3D.append(inpos[0])
y_list_3D.append(inpos[1])
z_list_3D.append(inpos[2])
r_list_3D.append(inpos[3])
for outpos in elem3D['out_pos']:
out_x_3D.append(outpos[0])
out_y_3D.append(outpos[1])
out_z_3D.append(outpos[2])
out_r_3D.append(outpos[3])
if len(x_list_3D) != len(out_x_3D):
print('x_list_3D and out_x_3D are not equal in length. Quit.')
sys.exit()
select_filters(elem3D['filter'])
if elem3D['eng'].split("_")[-1] == "wl":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem3D['eng'].split("_")[0]
+ "/eng_list_"
+ elem3D['eng'].split("_")[0]
+ "_s_xanes_standard_21pnt.txt")
elif elem3D['eng'].split("_")[-1] == "101":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem3D['eng'].split("_")
+ "/eng_list_"
+ elem3D['eng'].split("_")
+ "_xanes_standard_101pnt.txt")
elif elem3D['eng'].split("_")[-1] == "63":
eng_list = np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ elem3D['eng'].split("_")
+ "/eng_list_"
+ elem3D['eng'].split("_")
+ "_xanes_standard_63pnt.txt")
yield from multi_pos_xanes_3D(
eng_list,
x_list_3D,
y_list_3D,
z_list_3D,
r_list_3D,
exposure_time==elem3D['exposure'],
relative_rot_angle=relative_rot_angle,
rs=rs,
out_x=out_x_3D,
out_y=out_y_3D,
out_z=out_z_3D,
out_r=out_r_3D,
note=note,
simu=simu,
relative_move_flag=relative_move_flag,
traditional_sequence_flag=1,
sleep_time=0,
repeat=1)
# find = False
# defined = False
# for flt_elem in filters.keys():
# if elem.split("_")[0] == flt_elem.split("_")[0]:
# find = True
# if find is False:
# print("There is not filters defined for ", elem, "!")
# sys.exit(1)
#
# ### if there are 2D_sam_in and 2D_sam_out positions defined for the element
# find = False
# for in_elem in sam_in_pos_list_2D.keys():
# if elem.split("_")[0] == in_elem.split("_")[0]:
# find = True
# if find:
# find = False
# for out_elem in sam_out_pos_list_2D.keys():
# if elem.split("_")[0] == out_elem.split("_")[0]:
# find = True
# if find is False:
# print(
# elem, "2D_in_pos_list and", elem, "2D_in_pos_list dont match!"
# )
# sys.exit(1)
# if find:
# find = False
# for exp_elem in exposure_time_2D.keys():
# print(1, elem.split("_"), exp_elem.split("_"), find)
# if elem.split("_")[0] == exp_elem.split("_")[0]:
# find = True
# if find is False:
# print(2, elem.split("_"), exp_elem.split("_"))
# print("There is not exposure_time_2D defined for", elem)
# sys.exit(1)
# if find:
# defined = True
#
# ### if there are 3D_sam_in and 3D_sam_out positions defined for the element
# find = False
# for in_elem in sam_in_pos_list_3D.keys():
# if elem.split("_")[0] == in_elem.split("_")[0]:
# find = True
# if find:
# find = False
# for out_elem in sam_out_pos_list_3D.keys():
# if elem.split("_")[0] == out_elem.split("_")[0]:
# find = True
# if find is False:
# print(
# elem, "3D_in_pos_list and", elem, "3D_in_pos_list dont match!"
# )
# sys.exit(1)
# if find:
# find = False
# for exp_elem in exposure_time_3D.keys():
# if elem.split("_")[0] == exp_elem.split("_")[0]:
# find = True
# if find is False:
# print("There is not exposure_time_3D defined for", elem)
# sys.exit(1)
# if find:
# defined = True
#
# if not defined:
# print("There is neither 2D nor 3D position list defined for", elem)
# sys.exit()
#
# for elem in elements:
# select_filters(filters[elem.split("_")[0] + "_filters"])
#
# if ii.split("_")[1] == "wl":
# eng_list = np.genfromtxt(
# "/NSLS2/xf18id1/SW/xanes_ref/"
# + ii.split("_")[0]
# + "/eng_list_"
# + ii.split("_")[0]
# + "_s_xanes_standard_21pnt.txt"
# )
# elif ii.split("_")[1] == "101":
# eng_list = np.genfromtxt(
# "/NSLS2/xf18id1/SW/xanes_ref/"
# + ii
# + "/eng_list_"
# + ii
# + "_xanes_standard_101pnt.txt"
# )
# elif ii.split("_")[1] == "63":
# eng_list = np.genfromtxt(
# "/NSLS2/xf18id1/SW/xanes_ref/"
# + ii
# + "/eng_list_"
# + ii
# + "_xanes_standard_63pnt.txt"
# )
#
# if sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]:
# x_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[0, :]
# y_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[1, :]
# z_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[2, :]
# r_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[3, :]
# if sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]:
# out_x_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[0, :]
# out_y_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[1, :]
# out_z_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[2, :]
# out_r_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[3, :]
# else:
# print(elem, "_2D_out_pos_list is not defined!")
# sys.exit(1)
#
# if exposure_time_2D[elem.split("_")[0] + "_2D_exp"]:
# exp_2D = exposure_time_2D[elem.split("_")[0] + "_2D_exp"]
# else:
# print(elem, "_2D_exp is not defined!")
# sys.exit(1)
#
# yield from multipos_2D_xanes_scan2(
# eng_list,
# x_list_2D,
# y_list_2D,
# z_list_2D,
# r_list_2D,
# out_x=out_x_2D,
# out_y=out_y_2D,
# out_z=out_z_2D,
# out_r=out_r_2D,
# exposure_time=exp_2D,
# chunk_size=5,
# simu=simu,
# relative_move_flag=relative_move_flag,
# note=note,
# md=None,
# sleep_time=0,
# repeat_num=1,
# )
#
# if sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]:
# x_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[0, :]
# y_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[1, :]
# z_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[2, :]
# r_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[3, :]
# if sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]:
# out_x_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[0, :]
# out_y_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[1, :]
# out_z_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[2, :]
# out_r_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[3, :]
# else:
# print(elem, "_3D_out_pos_list is not defined!")
# sys.exit(1)
# if exposure_time_3D[elem.split("_")[0] + "_3D_exp"]:
# exp_3D = exposure_time_3D[elem.split("_")[0] + "_3D_exp"]
# else:
# print(elem, "_3D_exp is not defined!")
# sys.exit(1)
#
# yield from multi_pos_xanes_3D(
# eng_list,
# x_list_3D,
# y_list_3D,
# z_list_3D,
# r_list_3D,
# exposure_time=exp_3D,
# relative_rot_angle=relative_rot_angle,
# rs=rs,
# out_x=out_x_3D,
# out_y=out_y_3D,
# out_z=out_z_3D,
# out_r=out_r_3D,
# note=note,
# simu=simu,
# relative_move_flag=relative_move_flag,
# traditional_sequence_flag=1,
# sleep_time=0,
# repeat=1,
# )
#
# if kk != (repeat_num - 1):
# print(
# f"We are in multi_pos_2D_and_3D_xanes cycle # {kk}; we are going to sleep for {sleep_time} seconds ..."
# )
# yield from bps.sleep(sleep_time)
# for kk in range(repeat_num):
# for elem in elements:
# ### if there is a filter combination is defined for the element
# find = False
# defined = False
# for flt_elem in filters.keys():
# if elem.split("_")[0] == flt_elem.split("_")[0]:
# find = True
# if find is False:
# print("There is not filters defined for ", elem, "!")
# sys.exit(1)
#
# ### if there are 2D_sam_in and 2D_sam_out positions defined for the element
# find = False
# for in_elem in sam_in_pos_list_2D.keys():
# if elem.split("_")[0] == in_elem.split("_")[0]:
# find = True
# if find:
# find = False
# for out_elem in sam_out_pos_list_2D.keys():
# if elem.split("_")[0] == out_elem.split("_")[0]:
# find = True
# if find is False:
# print(
# elem, "2D_in_pos_list and", elem, "2D_in_pos_list dont match!"
# )
# sys.exit(1)
# if find:
# find = False
# for exp_elem in exposure_time_2D.keys():
# print(1, elem.split("_"), exp_elem.split("_"), find)
# if elem.split("_")[0] == exp_elem.split("_")[0]:
# find = True
# if find is False:
# print(2, elem.split("_"), exp_elem.split("_"))
# print("There is not exposure_time_2D defined for", elem)
# sys.exit(1)
# if find:
# defined = True
#
# ### if there are 3D_sam_in and 3D_sam_out positions defined for the element
# find = False
# for in_elem in sam_in_pos_list_3D.keys():
# if elem.split("_")[0] == in_elem.split("_")[0]:
# find = True
# if find:
# find = False
# for out_elem in sam_out_pos_list_3D.keys():
# if elem.split("_")[0] == out_elem.split("_")[0]:
# find = True
# if find is False:
# print(
# elem, "3D_in_pos_list and", elem, "3D_in_pos_list dont match!"
# )
# sys.exit(1)
# if find:
# find = False
# for exp_elem in exposure_time_3D.keys():
# if elem.split("_")[0] == exp_elem.split("_")[0]:
# find = True
# if find is False:
# print("There is not exposure_time_3D defined for", elem)
# sys.exit(1)
# if find:
# defined = True
#
# if not defined:
# print("There is neither 2D nor 3D position list defined for", elem)
# sys.exit()
#
# for elem in elements:
# select_filters(filters[elem.split("_")[0] + "_filters"])
#
# if ii.split("_")[1] == "wl":
# eng_list = np.genfromtxt(
# "/NSLS2/xf18id1/SW/xanes_ref/"
# + ii.split("_")[0]
# + "/eng_list_"
# + ii.split("_")[0]
# + "_s_xanes_standard_21pnt.txt"
# )
# elif ii.split("_")[1] == "101":
# eng_list = np.genfromtxt(
# "/NSLS2/xf18id1/SW/xanes_ref/"
# + ii
# + "/eng_list_"
# + ii
# + "_xanes_standard_101pnt.txt"
# )
# elif ii.split("_")[1] == "63":
# eng_list = np.genfromtxt(
# "/NSLS2/xf18id1/SW/xanes_ref/"
# + ii
# + "/eng_list_"
# + ii
# + "_xanes_standard_63pnt.txt"
# )
#
# if sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]:
# x_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[0, :]
# y_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[1, :]
# z_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[2, :]
# r_list_2D = np.asarray(
# sam_in_pos_list_2D[elem.split("_")[0] + "_2D_in_pos_list"]
# )[3, :]
# if sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]:
# out_x_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[0, :]
# out_y_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[1, :]
# out_z_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[2, :]
# out_r_2D = np.asarray(
# sam_out_pos_list_2D[elem.split("_")[0] + "_2D_out_pos_list"]
# )[3, :]
# else:
# print(elem, "_2D_out_pos_list is not defined!")
# sys.exit(1)
#
# if exposure_time_2D[elem.split("_")[0] + "_2D_exp"]:
# exp_2D = exposure_time_2D[elem.split("_")[0] + "_2D_exp"]
# else:
# print(elem, "_2D_exp is not defined!")
# sys.exit(1)
#
# yield from multipos_2D_xanes_scan2(
# eng_list,
# x_list_2D,
# y_list_2D,
# z_list_2D,
# r_list_2D,
# out_x=out_x_2D,
# out_y=out_y_2D,
# out_z=out_z_2D,
# out_r=out_r_2D,
# exposure_time=exp_2D,
# chunk_size=5,
# simu=simu,
# relative_move_flag=relative_move_flag,
# note=note,
# md=None,
# sleep_time=0,
# repeat_num=1,
# )
#
# if sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]:
# x_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[0, :]
# y_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[1, :]
# z_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[2, :]
# r_list_3D = np.asarray(
# sam_in_pos_list_3D[elem.split("_")[0] + "_3D_in_pos_list"]
# )[3, :]
# if sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]:
# out_x_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[0, :]
# out_y_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[1, :]
# out_z_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[2, :]
# out_r_3D = np.asarray(
# sam_out_pos_list_3D[elem.split("_")[0] + "_3D_out_pos_list"]
# )[3, :]
# else:
# print(elem, "_3D_out_pos_list is not defined!")
# sys.exit(1)
# if exposure_time_3D[elem.split("_")[0] + "_3D_exp"]:
# exp_3D = exposure_time_3D[elem.split("_")[0] + "_3D_exp"]
# else:
# print(elem, "_3D_exp is not defined!")
# sys.exit(1)
#
# yield from multi_pos_xanes_3D(
# eng_list,
# x_list_3D,
# y_list_3D,
# z_list_3D,
# r_list_3D,
# exposure_time=exp_3D,
# relative_rot_angle=relative_rot_angle,
# rs=rs,
# out_x=out_x_3D,
# out_y=out_y_3D,
# out_z=out_z_3D,
# out_r=out_r_3D,
# note=note,
# simu=simu,
# relative_move_flag=relative_move_flag,
# traditional_sequence_flag=1,
# sleep_time=0,
# repeat=1,
# )
#
# if kk != (repeat_num - 1):
# print(
# f"We are in multi_pos_2D_and_3D_xanes cycle # {kk}; we are going to sleep for {sleep_time} seconds ..."
# )
# yield from bps.sleep(sleep_time)
def multi_pos_2D_xanes_and_3D_tomo(
elements=["Ni"],
sam_in_pos_list_2D=[[[0, 0, 0, 0]]],
sam_out_pos_list_2D=[[[0, 0, 0, 0]]],
sam_in_pos_list_3D=[[[0, 0, 0, 0]]],
sam_out_pos_list_3D=[[[0, 0, 0, 0]]],
exposure_time_2D=[0.05],
exposure_time_3D=[0.05],
relative_rot_angle=0,
rs=1,
eng_3D=[10, 60],
note="",
relative_move_flag=0,
simu=False,
):
sam_in_pos_list_2D = np.asarray(sam_in_pos_list_2D)
sam_out_pos_list_2D = np.asarray(sam_out_pos_list_2D)
sam_in_pos_list_3D = np.asarray(sam_in_pos_list_3D)
sam_out_pos_list_3D = np.asarray(sam_out_pos_list_3D)
exposure_time_2D = np.asarray(exposure_time_2D)
exposure_time_3D = np.asarray(exposure_time_3D)
if exposure_time_2D.shape[0] == 1:
exposure_time_2D = np.ones(len(elements)) * exposure_time_2D[0]
elif len(elements) != exposure_time_2D.shape[0]:
# to do in bs manner
pass
if exposure_time_3D.shape[0] == 1:
exposure_time_3D = np.ones(len(elements)) * exposure_time_3D[0]
elif len(elements) != exposure_time_3D.shape[0]:
# to do in bs manner
pass
eng_list = []
for ii in elements:
eng_list.append(
list(
np.genfromtxt(
"/NSLS2/xf18id1/SW/xanes_ref/"
+ ii
+ "/eng_list_"
+ ii
+ "_xanes_standard.txt"
)
)
)
eng_list = np.array(eng_list)
if sam_in_pos_list_2D.size != 0:
for ii in range(sam_in_pos_list_2D.shape[0]):
for jj in range(len(elements)):
x_list = sam_in_pos_list_2D[ii, :, 0]
y_list = sam_in_pos_list_2D[ii, :, 1]
z_list = sam_in_pos_list_2D[ii, :, 2]
r_list = sam_in_pos_list_2D[ii, :, 3]
out_x = sam_out_pos_list_2D[ii, :, 0]
out_y = sam_out_pos_list_2D[ii, :, 1]
out_z = sam_out_pos_list_2D[ii, :, 2]
out_r = sam_out_pos_list_2D[ii, :, 3]
print(x_list)
print(y_list)
print(z_list)
print(r_list)
print(out_x)
print(out_y)
print(out_z)
print(out_r)
yield from multipos_2D_xanes_scan2(
eng_list[jj],
x_list,
y_list,
z_list,
r_list,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
exposure_time=exposure_time_2D[jj],
chunk_size=5,
simu=simu,
relative_move_flag=relative_move_flag,
note=note,
md=None,
sleep_time=0,
repeat_num=1,
)
if sam_in_pos_list_3D.size != 0:
for ii in range(sam_in_pos_list_3D.shape[0]):
for jj in range(len(elements)):
x_list = sam_in_pos_list_3D[ii, :, 0]
y_list = sam_in_pos_list_3D[ii, :, 1]
z_list = sam_in_pos_list_3D[ii, :, 2]
r_list = sam_in_pos_list_3D[ii, :, 3]
out_x = sam_out_pos_list_3D[ii, :, 0]
out_y = sam_out_pos_list_3D[ii, :, 1]
out_z = sam_out_pos_list_3D[ii, :, 2]
out_r = sam_out_pos_list_3D[ii, :, 3]
yield from multi_pos_xanes_3D(
eng_list[jj, eng_3D],
x_list,
y_list,
z_list,
r_list,
exposure_time=exposure_time_3D[jj],
relative_rot_angle=relative_rot_angle,
rs=rs,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
note=note,
simu=simu,
relative_move_flag=relative_move_flag,
traditional_sequence_flag=1,
sleep_time=0,
repeat=1,
)
def zps_motor_scan_with_Andor(
motors,
starts,
ends,
num_steps,
out_x=100,
out_y=0,
out_z=0,
out_r=0,
exposure_time=None,
period=None,
chunk_size=1,
note="",
relative_move_flag=1,
simu=False,
rot_first_flag=0,
md=None,
):
global ZONE_PLATE
detectors = [Andor, ic3]
# if len(out_x) != len(motors):
# out_x = [out_x[0]] * len(motors)
#
# if len(out_y) != len(motors):
# out_y = [out_y[0]] * len(motors)
#
# if len(out_z) != len(motors):
# out_z = [out_z[0]] * len(motors)
#
# if len(out_r) != len(motors):
# out_r = [out_r[0]] * len(motors)
def _set_andor_param():
yield from mv(Andor.cam.acquire, 0)
yield from mv(Andor.cam.image_mode, 0)
yield from mv(Andor.cam.num_images, chunk_size)
yield from mv(Andor.cam.acquire_time, exposure_time)
yield from mv(Andor.cam.acquire_period, exposure_time)
if exposure_time is not None:
yield from _set_andor_param()
mot_ini = []
mot_start = []
mot_end = []
for start, end, motor in zip(starts, ends, motors):
mot_ini.append(getattr(motor, "position"))
mot_start.append(getattr(motor, "position") + start)
mot_end.append(getattr(motor, "position") + end)
mot_num_step = np.int_(num_steps)
#
#
# motor_out = []
# if relative_move_flag:
# for motor in motors:
# motor_out.append(motor_ini + out)
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if out_x else motor_x_ini
motor_y_out = motor_y_ini + out_y if out_y else motor_y_ini
motor_z_out = motor_z_ini + out_z if out_z else motor_z_ini
motor_r_out = motor_r_ini + out_r if out_r else motor_r_ini
else:
motor_x_out = out_x if out_x else motor_x_ini
motor_y_out = out_y if out_y else motor_y_ini
motor_z_out = out_z if out_z else motor_z_ini
motor_r_out = out_r if out_r else motor_r_ini
print("hello1")
_md = {
"detectors": [det.name for det in detectors],
"motors": [mot.name for mot in motors],
"num_bkg_images": 5,
"num_dark_images": 5,
"mot_start": starts,
"motor_end": ends,
"motor_num_step": mot_num_step,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"exposure_time": exposure_time,
"chunk_size": chunk_size,
"XEng": XEng.position,
"plan_args": {
"mot_start": mot_start,
"mot_end": mot_end,
"mot_num_step": mot_num_step,
"exposure_time": exposure_time,
"chunk_size": chunk_size,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"note": note if note else "None",
"relative_move_flag": relative_move_flag,
"rot_first_flag": rot_first_flag,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "zps_motor_scan_with_Andor",
"hints": {},
"operator": "FXI",
"zone_plate": ZONE_PLATE,
"note": note if note else "None",
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(motors.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
@stage_decorator(list(detectors) + motors)
@run_decorator(md=_md)
def zps_motor_scan_inner():
# take dark image
print("take 5 dark image")
yield from _take_dark_image(detectors, motors, num_dark=5)
print("open shutter ...")
yield from _open_shutter(simu)
print("taking mosaic image ...")
if len(motors) == 1:
mot_pos = np.linspace(
mot_start[0], mot_end[0], mot_num_step[0], endpoint=False
)
elif len(motors) == 2:
mot_pos_coor1, mot_pos_coor2 = np.meshgrid(
np.linspace(mot_start[0], mot_end[0], mot_num_step[0], endpoint=False),
np.linspace(mot_start[1], mot_end[1], mot_num_step[1], endpoint=False),
)
mot_pos = np.array([mot_pos_coor1.flatten(), mot_pos_coor2.flatten()])
elif len(motors) == 3:
mot_pos_coor1, mot_pos_coor2, mot_pos_coor3 = np.meshgrid(
np.linspace(mot_start[0], mot_end[0], mot_num_step[0], endpoint=False),
np.linspace(mot_start[1], mot_end[1], mot_num_step[1], endpoint=False),
np.linspace(mot_start[2], mot_end[2], mot_num_step[2], endpoint=False),
)
mot_pos = np.array(
[
mot_pos_coor1.flatten(),
mot_pos_coor2.flatten(),
mot_pos_coor3.flatten(),
]
)
elif len(motors) == 4:
mot_pos_coor1, mot_pos_coor2, mot_pos_coor3, mot_pos_coor4 = np.meshgrid(
np.linspace(mot_start[0], mot_end[0], mot_num_step[0], endpoint=False),
np.linspace(mot_start[1], mot_end[1], mot_num_step[1], endpoint=False),
np.linspace(mot_start[2], mot_end[2], mot_num_step[2], endpoint=False),
np.linspace(mot_start[3], mot_end[3], mot_num_step[3], endpoint=False),
)
mot_pos = np.array(
[
mot_pos_coor1.flatten(),
mot_pos_coor2.flatten(),
mot_pos_coor3.flatten(),
mot_pos_coor4.flatten(),
]
)
for jj in range(mot_pos.shape[1]):
# yield from mv(motors, mot_pos[:, jj])
for ii in range(len(motors)):
yield from mv(motors[ii], mot_pos[ii, jj])
yield from _take_image(detectors, motors, 1)
print("moving sample out to take 5 background image")
yield from _take_bkg_image(
motor_x_out,
motor_y_out,
motor_z_out,
motor_r_out,
detectors,
motors,
num_bkg=5,
simu=simu,
traditional_sequence_flag=rot_first_flag,
)
# move sample in
yield from _move_sample_in(
motor_x_ini,
motor_y_ini,
motor_z_ini,
motor_r_ini,
repeat=1,
trans_first_flag=1 - rot_first_flag,
)
print("closing shutter")
yield from _close_shutter(simu)
yield from zps_motor_scan_inner()
yield from mv(Andor.cam.image_mode, 1)
print("scan finished")
txt = get_scan_parameter()
insert_text(txt)
print(txt)
def diff_tomo(
sam_in_pos_list=[[0, 0, 0, 0],],
sam_out_pos_list=[[0, 0, 0, 0],],
exposure=[0.05],
period=[0.05],
relative_rot_angle=182,
rs=1,
eng=None,
note="",
filters=[],
relative_move_flag=0,
md=None,
):
sam_in_pos_list = np.array(sam_in_pos_list)
sam_out_pos_list = np.array(sam_out_pos_list)
if eng is None:
print("Please specify two energies as a list for differential tomo scans.")
return
if len(exposure) != sam_in_pos_list.shape[0]:
exposure = np.ones(sam_in_pos_list.shape[0]) * exposure[0]
if len(period) != sam_in_pos_list.shape[0]:
period = np.ones(sam_in_pos_list.shape[0]) * period[0]
for jj in range(sam_in_pos_list.shape[0]):
for ii in range(len(eng)):
yield from move_zp_ccd(
eng[ii], move_flag=1, info_flag=1, move_clens_flag=0, move_det_flag=0
)
yield from mv(
zps.sx,
sam_in_pos_list[jj, 0],
zps.sy,
sam_in_pos_list[jj, 1],
zps.sz,
sam_in_pos_list[jj, 2],
)
yield from mv(zps.pi_r, sam_in_pos_list[jj, 3])
yield from fly_scan(
exposure_time=exposure[jj],
relative_rot_angle=relative_rot_angle,
period=period[jj],
chunk_size=20,
out_x=sam_out_pos_list[jj, 0],
out_y=sam_out_pos_list[jj, 1],
out_z=sam_out_pos_list[jj, 2],
out_r=sam_out_pos_list[jj, 3],
rs=rs,
note=note,
simu=False,
relative_move_flag=relative_move_flag,
traditional_sequence_flag=1,
filters=filters,
md=md,
)
def damon_scan(
eng_list1,
eng_list2,
x_list,
y_list,
z_list,
r_list,
exposure_time1=10.0,
exposure_time2=10.0,
chunk_size1=1,
chunk_size2=1,
out_x=None,
out_y=None,
out_z=None,
out_r=None,
iters=10,
sleep_time=1,
note="",
):
export_pdf(1)
insert_text('start "damon_scan"')
x_list = np.array(x_list)
y_list = np.array(y_list)
z_list = np.array(z_list)
for n in range(iters):
print(f"iteration # {n+1} / {iters}")
"""
yield from move_zp_ccd(6.5)
for i in range(4):
yield from mv(filter2, 1)
yield from mv(filter4, 1)
yield from xanes_scan2() # for Mn
yield from move_zp_ccd(9)
for i in range(4):
yield from mv(filter2, 1)
yield from mv(filter4, 1)
yield from xanes_scan2() # for Cu
"""
yield from move_zp_ccd(6.5, move_flag=1, move_clens_flag=1, move_det_flag=0)
# for i in range(4):
# yield from mv(filter1, 0)
# yield from mv(filter2, 0)
# yield from mv(filter3, 0)
# yield from mv(filter4, 0)
# yield from mv(ssa.v_gap, 1)
# yield from multipos_2D_xanes_scan2(eng_list1, x_list, y_list, z_list, r_list, out_x, out_y, out_z, out_r, repeat_num=1, exposure_time=exposure_time1, sleep_time=0, chunk_size=chunk_size1, relative_move_flag=1, note=note)
# once move energy above 8.86 keV, we have a sample shift of -40(x) and -20(y),
# the sample at focus will not be at rotation center, but it is ok if doing 2D XANES
yield from move_zp_ccd(
eng_list2[0], move_flag=1, move_clens_flag=1, move_det_flag=0
)
for i in range(4):
yield from mv(filter1, 0)
yield from mv(filter2, 0)
yield from mv(filter3, 1)
yield from mv(filter4, 1)
yield from mv(ssa.v_gap, 0.2)
yield from multipos_2D_xanes_scan2(
eng_list2,
x_list,
y_list,
z_list,
r_list,
out_x,
out_y,
out_z,
out_r,
repeat_num=1,
exposure_time=exposure_time2,
sleep_time=0,
chunk_size=chunk_size2,
relative_move_flag=1,
note=note,
)
print(f"sleep for {sleep_time} sec")
yield from bps.sleep(sleep_time)
print(f"finished scan, now moving back to {eng_list1[0]} keV")
yield from mv(zps.sx, x_list[0], zps.sy, y_list[0], zps.sz, z_list[0])
yield from move_zp_ccd(
eng_list1[0], move_flag=1, move_clens_flag=1, move_det_flag=0
)
insert_text('finish "damon scan"')
def user_fly_scan(
exposure_time=0.1,
relative_rot_angle=180,
period=0.15,
chunk_size=20,
out_x=None,
out_y=2000,
out_z=None,
out_r=None,
rs=1,
note="",
simu=False,
relative_move_flag=1,
traditional_sequence_flag=1,
filters=[],
md=None,
):
"""
Inputs:
-------
exposure_time: float, in unit of sec
relative_rot_angle: float,
total rotation angles start from current rotary stage (zps.pi_r) position
period: float, in unit of sec
period of taking images, "period" should >= "exposure_time"
chunk_size: int, default setting is 20
number of images taken for each trigger of Andor camera
out_x: float, default is 0
relative movement of sample in "x" direction using zps.sx to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_y: float, default is 0
relative movement of sample in "y" direction using zps.sy to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_z: float, default is 0
relative movement of sample in "z" direction using zps.sz to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_r: float, default is 0
relative movement of sample by rotating "out_r" degrees, using zps.pi_r to move out sample
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
rs: float, default is 1
rotation speed in unit of deg/sec
note: string
adding note to the scan
simu: Bool, default is False
True: will simulate closing/open shutter without really closing/opening
False: will really close/open shutter
"""
global ZONE_PLATE
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if out_x else motor_x_ini
motor_y_out = motor_y_ini + out_y if out_y else motor_y_ini
motor_z_out = motor_z_ini + out_z if out_z else motor_z_ini
motor_r_out = motor_r_ini + out_r if out_r else motor_r_ini
else:
motor_x_out = out_x if out_x else motor_x_ini
motor_y_out = out_y if out_y else motor_y_ini
motor_z_out = out_z if out_z else motor_z_ini
motor_r_out = out_r if out_r else motor_r_ini
motor = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
offset_angle = -0.5 * rs
current_rot_angle = zps.pi_r.position
target_rot_angle = current_rot_angle + relative_rot_angle
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motor],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"relative_rot_angle": relative_rot_angle,
"period": period,
"chunk_size": chunk_size,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"rs": rs,
"relative_move_flag": relative_move_flag,
"traditional_sequence_flag": traditional_sequence_flag,
"filters": [filt.name for filt in filters] if filters else "None",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "fly_scan",
"num_bkg_images": 20,
"num_dark_images": 20,
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
# yield from _set_andor_param(exposure_time=exposure_time, period=period, chunk_size=chunk_size)
yield from _set_rotation_speed(rs=rs)
print("set rotation speed: {} deg/sec".format(rs))
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
# close shutter, dark images: numer=chunk_size (e.g.20)
print("\nshutter closed, taking dark images...")
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=20
)
yield from _take_dark_image(detectors, motor, num_dark=1, simu=simu)
yield from bps.sleep(1)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
# open shutter, tomo_images
yield from _open_shutter(simu=simu)
print("\nshutter opened, taking tomo images...")
yield from mv(zps.pi_r, current_rot_angle + offset_angle)
status = yield from abs_set(zps.pi_r, target_rot_angle, wait=False)
yield from bps.sleep(1)
while not status.done:
yield from trigger_and_read(list(detectors) + motor)
# bkg images
print("\nTaking background images...")
yield from _set_rotation_speed(rs=30)
# yield from abs_set(zps.pi_r.velocity, rs)
for flt in filters:
yield from mv(flt, 1)
yield from mv(flt, 1)
yield from bps.sleep(1)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=20
)
yield from _take_bkg_image(
motor_x_out,
motor_y_out,
motor_z_out,
motor_r_out,
detectors,
motor,
num_bkg=1,
simu=False,
traditional_sequence_flag=traditional_sequence_flag,
)
yield from _close_shutter(simu=simu)
yield from _move_sample_in(
motor_x_ini,
motor_y_ini,
motor_z_ini,
motor_r_ini,
trans_first_flag=traditional_sequence_flag,
)
for flt in filters:
yield from mv(flt, 0)
uid = yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("scan finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_fly_only(
exposure_time=0.1,
end_rot_angle=180,
period=0.15,
chunk_size=20,
rs=1,
note="",
simu=False,
dark_scan_id=0,
bkg_scan_id=0,
md=None,
):
global ZONE_PLATE
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
motor = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
# offset_angle = 0 #-0.5 * rs * np.sign(relative_rot_angle)
current_rot_angle = zps.pi_r.position
target_rot_angle = end_rot_angle
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motor],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"end_rot_angle": end_rot_angle,
"period": period,
"chunk_size": chunk_size,
"rs": rs,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
"dark_scan_id": dark_scan_id,
"bkg_scan_id": bkg_scan_id,
},
"plan_name": "user_fly_only",
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
yield from _set_rotation_speed(rs=rs)
print("set rotation speed: {} deg/sec".format(rs))
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
yield from _open_shutter(simu=simu)
status = yield from abs_set(zps.pi_r, target_rot_angle, wait=False)
while not status.done:
yield from trigger_and_read(list(detectors) + motor)
uid = yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("scan finished")
# yield from _set_rotation_speed(rs=30)
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_dark_only(exposure_time=0.1, chunk_size=20, note="", simu=False, md=None):
"""
Take dark field images.
Inputs:
-------
exposure_time: float, in unit of sec
chunk_size: int, default setting is 20
number of images taken for each trigger of Andor camera
note: string
adding note to the scan
simu: Bool, default is False
True: will simulate closing/open shutter without really closing/opening
False: will really close/open shutter
"""
global ZONE_PLATE
period = exposure_time # default to exposure time for backgrounds
detectors = [Andor, ic3]
motor = []
_md = {
"detectors": ["Andor"],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"chunk_size": chunk_size,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "user_dark_only",
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
@stage_decorator(list(detectors) + motor)
@run_decorator(md=_md)
def inner_scan():
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
yield from _take_dark_image(detectors, motor, num_dark=1, simu=simu)
uid = yield from inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("dark finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_bkg_only(
exposure_time=0.1,
chunk_size=20,
out_x=None,
out_y=2000,
out_z=None,
out_r=None,
note="",
simu=False,
relative_move_flag=1,
traditional_sequence_flag=1,
md=None,
):
"""
Move sample out of the way and take background (aka flat) images.
Inputs:
-------
exposure_time: float, in unit of sec
chunk_size: int, default setting is 20
number of images taken for each trigger of Andor camera
out_x: float, default is 0
relative movement of sample in "x" direction using zps.sx to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_y: float, default is 0
relative movement of sample in "y" direction using zps.sy to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_z: float, default is 0
relative movement of sample in "z" direction using zps.sz to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_r: float, default is 0
relative movement of sample by rotating "out_r" degrees, using zps.pi_r to move out sample
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
note: string
adding note to the scan
simu: Bool, default is False
True: will simulate closing/open shutter without really closing/opening
False: will really close/open shutter
"""
global ZONE_PLATE
period = exposure_time # default to exposure time for backgrounds
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if out_x else motor_x_ini
motor_y_out = motor_y_ini + out_y if out_y else motor_y_ini
motor_z_out = motor_z_ini + out_z if out_z else motor_z_ini
motor_r_out = motor_r_ini + out_r if out_r else motor_r_ini
else:
motor_x_out = out_x if out_x else motor_x_ini
motor_y_out = out_y if out_y else motor_y_ini
motor_z_out = out_z if out_z else motor_z_ini
motor_r_out = out_r if out_r else motor_r_ini
motor = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
current_rot_angle = zps.pi_r.position
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motor],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"chunk_size": chunk_size,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"relative_move_flag": relative_move_flag,
"traditional_sequence_flag": traditional_sequence_flag,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "user_bkg_only",
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
# yield from _set_andor_param(exposure_time=exposure_time, period=period, chunk_size=chunk_size)
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
yield from _open_shutter(simu=simu)
# bkg images
print("\nTaking background images...")
yield from _set_rotation_speed(rs=30)
yield from _take_bkg_image(
motor_x_out,
motor_y_out,
motor_z_out,
motor_r_out,
detectors,
motor,
num_bkg=1,
simu=False,
traditional_sequence_flag=traditional_sequence_flag,
)
yield from _move_sample_in(
motor_x_ini,
motor_y_ini,
motor_z_ini,
motor_r_ini,
trans_first_flag=traditional_sequence_flag,
)
uid = yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("bkg finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_multiple_fly_scans(
xyz_list,
bkg_every_x_scans=10,
exposure_time=0.1,
angle=70,
period=0.15,
chunk_size=20,
out_x=None,
out_y=None,
out_z=None,
out_r=None,
rs=1,
note="",
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
md=None,
):
# first take dark field
dark_scan_id = yield from user_dark_only(exposure_time, chunk_size, note, simu, md)
# open shutter for rest of data taking
yield from _open_shutter(simu=simu)
print("\nshutter opened")
bkg_index = 0
bkg_scan_id = None
for i, pos in enumerate(xyz_list):
x, y, z = pos
if i == 0 or bkg_index + bkg_every_x_scans <= i:
# take background
bkg_scan_id = yield from user_bkg_only(
exposure_time,
chunk_size,
out_x,
out_y,
out_z,
out_r,
note,
simu,
relative_move_flag,
traditional_sequence_flag,
md,
)
bkg_index = i
# mv x, y, z, r position
yield from mv(zps.sx, x, zps.sy, y, zps.sz, z, zps.pi_r, angle)
# take tomo
angle *= -1 # rocker scan, switch angle back and forth
while True:
try:
scan_id = yield from user_fly_only(
exposure_time,
angle,
period,
chunk_size,
rs,
note,
simu,
dark_scan_id,
bkg_scan_id,
md,
)
break
except Exception as e:
print(e)
print("Finished scans %s - %s" % (dark_scan_id, scan_id))
def user_mosaic_gen(x_start, x_stop, x_step, y_start, y_stop, y_step, z_pos):
xyz_list = []
for y in range(y_start, y_stop + y_step, y_step):
for x in range(x_start, x_stop + x_step, x_step):
xyz_list.append((x, y, z_pos))
return xyz_list
def user_hex_mosaic_xyz(
x_start, x_stop, x_step, x_offset, y_start, y_stop, y_step, z_pos
):
xyz_list = []
# apply the x_offse every other row
apply_offset = False
for y in range(y_start, y_stop + y_step, y_step):
if apply_offset:
offset = x_offset
else:
offset = 0
apply_offset = not apply_offset
for x in range(x_start, x_stop + x_step, x_step):
xyz_list.append((x + offset, y, z_pos))
return xyz_list
def v4_z_offset(xyz_list):
# offset is only dependent on y
new_xyz_list = []
for x, y, z in xyz_list:
z = 50 + (56 - 50) * (-3873 - y) / (-1000)
new_xyz_list.append((x, y, z))
return new_xyz_list
def point_inside_polygon(x, y, poly):
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def trim_points_to_polygon(xyz_list, poly):
xyz_list_out = []
for x, y, z in xyz_list:
if point_inside_polygon(x, y, poly):
xyz_list_out.append((x, y, z))
return xyz_list_out
def ming_scan():
x_list = [-892, -899, -865, -843, -782]
y_list = [384, 437, 431, 427, 807]
z_list = [-718, -726, -719, -715, -700]
r_list = [0, 0, 0, 0, 0]
out_x = -443
out_y = 425
out_z = -727
out_r = 1
# yield from multipos_2D_xanes_scan2(Ni_eng_list_short, x_list, y_list, z_list, r_list, out_x, out_y, out_z, out_r, repeat_num=1, exposure_time=0.04, sleep_time=1, chunk_size=5, simu=False, relative_move_flag=0, note='P95_NMC_Ag_700_0.2C_20cy'))
for i in range(4, 6):
txt = f"start 3D xanes at pos {i}"
insert_text(txt)
print(txt)
yield from mv(
zps.sx, x_list[i], zps.sy, y_list[i], zps.sz, z_list[i], zps.pi_r, -80
)
yield from xanes_3D(
eng_list=Ni_eng_list_in_situ,
exposure_time=0.03,
relative_rot_angle=160,
period=0.03,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
rs=3,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=f"p95_700_Ag_0.2C_20cy_pos_{i+1}",
)
def ming_scan2():
yield from move_zp_ccd(6.5)
yield from bps.sleep(5)
yield from mv(zps.sx, -782, zps.sy, 807, zps.sz, -700, zps.pi_r, -80)
yield from xanes_3D(
eng_list=Mn_eng_list_in_situ,
exposure_time=0.025,
relative_rot_angle=160,
period=0.025,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
rs=6,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=f"p95_700_Ag_0.2C_20cy_pos_5_Mn",
)
print("start Co xanes")
yield from move_zp_ccd(7.8)
yield from mv(zps.sx, -782, zps.sy, 807, zps.sz, -700, zps.pi_r, -80)
yield from bps.sleep(5)
yield from xanes_3D(
eng_list=Co_eng_list_in_situ,
exposure_time=0.02,
relative_rot_angle=160,
period=0.025,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
rs=6,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=f"p95_700_Ag_0.2C_20cy_pos_5_Co",
)
print("start Mn xanes")
yield from move_zp_ccd(8.2)
yield from mv(zps.sx, -782, zps.sy, 807, zps.sz, -700, zps.pi_r, -80)
yield from bps.sleep(5)
yield from xanes_3D(
eng_list=Ni_eng_list_in_situ,
exposure_time=0.015,
relative_rot_angle=160,
period=0.025,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
rs=6,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=f"p95_700_Ag_0.2C_20cy_pos_5_Ni",
)
def ming_scan3():
x_3D = [395, 513]
y_3D = [1067, 756]
z_3D = [-496, -508]
r_3D = [-80, -80]
yield from move_zp_ccd(8.2)
yield from mv(zps.sx, x_3D[0], zps.sy, y_3D[0], zps.sz, z_3D[0], zps.pi_r, -80)
yield from xanes_3D(
eng_list=Ni_eng_list_in_situ,
exposure_time=0.02,
relative_rot_angle=160,
period=0.02,
out_x=2000,
out_y=None,
out_z=None,
out_r=1,
rs=6,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=f"p95_600_Ag_pristine_pos1",
)
yield from mv(zps.sx, x_3D[1], zps.sy, y_3D[1], zps.sz, z_3D[1], zps.pi_r, -80)
yield from xanes_3D(
eng_list=Ni_eng_list_in_situ,
exposure_time=0.02,
relative_rot_angle=160,
period=0.02,
out_x=2000,
out_y=None,
out_z=None,
out_r=1,
rs=6,
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
note=f"p95_600_Ag_pristine_pos2",
)
def qingchao_scan(
eng_list,
x_list1,
y_list1,
z_list1,
r_list1,
x_list2,
y_list2,
z_list2,
r_list2,
sleep_time=0,
num=1,
):
for i in range(num):
print(f"repeat # {i}")
for j in range(5):
yield from mv(filter3, 1, filter4, 1)
yield from multipos_2D_xanes_scan2(
eng_list,
x_list=x_list1,
y_list=y_list1,
z_list=z_list1,
r_list=r_list1,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
repeat_num=1,
exposure_time=0.1,
sleep_time=sleep_time,
chunk_size=5,
relative_move_flag=True,
note="622_filter3+4",
)
for j in range(5):
yield from mv(filter3, 0, filter4, 1)
yield from multipos_2D_xanes_scan2(
eng_list,
x_list=x_list2,
y_list=y_list2,
z_list=z_list2,
r_list=r_list2,
out_x=out_x,
out_y=out_y,
out_z=out_z,
out_r=out_r,
repeat_num=1,
exposure_time=0.1,
sleep_time=sleep_time,
chunk_size=5,
relative_move_flag=True,
note="622_filter4",
)
print(f"slepp for {sleep_time} sec ...")
yield from bps.sleep(sleep_time)
def ming():
for i in range(2):
yield from multipos_2D_xanes_scan2(Ni_list_2D,x_list,y_list,z_list,r_list,out_x=None,out_y=None,out_z=950,out_r=-90,exposure_time=0.1,repeat_num=3,sleep_time=600,relative_move_flag=0,chunk_size=5,simu=False,note='N83_insitu_pristine_filter_2+3+4')
yield from movpos(2, x_list, y_list, z_list, r_list)
yield from mv(zps.pi_r, -70)
yield from xanes_3D(Ni_list_3D, exposure_time=0.1, relative_rot_angle=140, period=0.1, out_x=None, out_y=None, out_z=2500, out_r=-20, rs=3, simu=False, relative_move_flag=1, note='N83_pos2')
yield from mv(zps.pi_r, 0)
yield from multipos_2D_xanes_scan2(Ni_list_2D,x_list,y_list,z_list,r_list,out_x=None,out_y=None,out_z=950,out_r=-90,exposure_time=0.1,repeat_num=3,sleep_time=600,relative_move_flag=0,chunk_size=5,simu=False,note='N83_insitu_pristine_filter_2+3+4')
yield from movpos(4, x_list, y_list, z_list, r_list)
yield from mv(zps.pi_r, -70)
yield from xanes_3D(Ni_list_3D, exposure_time=0.1, relative_rot_angle=145, period=0.1, out_x=None, out_y=None, out_z=2500, out_r=-20, rs=3, simu=False, relative_move_flag=1, note='N83_pos4')
yield from mv(zps.pi_r, 0)
insert_text('take xanes of full_eng_list')
for i in range(1):
yield from multipos_2D_xanes_scan2(Ni_eng_list_63pnt,x_list,y_list,z_list,r_list,out_x=None,out_y=None,out_z=950,out_r=-90,exposure_time=0.1,repeat_num=3,sleep_time=600,relative_move_flag=0,chunk_size=5,simu=False,note='N83_insitu_pristine_filter_2+3+4')
for j in range(4):
insert_text(f'taking 3D xanes at pos{j}\n')
yield from movpos(j, x_list, y_list, z_list, r_list)
yield from mv(zps.pi_r, -70)
yield from xanes_3D(Ni_list_3D, exposure_time=0.1, relative_rot_angle=140, period=0.1, out_x=None, out_y=None, out_z=2500, out_r=-20, rs=3, simu=False, relative_move_flag=1, note=f'N83_pos{j}')
yield from mv(zps.pi_r, 0)
for i in range(4):
yield from multipos_2D_xanes_scan2(Ni_list_2D,x_list,y_list,z_list,r_list,out_x=None,out_y=None,out_z=950,out_r=-90,exposure_time=0.1,repeat_num=3,sleep_time=600,relative_move_flag=0,chunk_size=5,simu=False,note='N83_insitu_pristine_filter_2+3+4')
yield from movpos(2, x_list, y_list, z_list, r_list)
yield from mv(zps.pi_r, -70)
yield from xanes_3D(Ni_list_3D, exposure_time=0.1, relative_rot_angle=140, period=0.1, out_x=None, out_y=None, out_z=2500, out_r=-20, rs=3, simu=False, relative_move_flag=1, note='N83_pos2')
yield from mv(zps.pi_r, 0)
yield from multipos_2D_xanes_scan2(Ni_list_2D,x_list,y_list,z_list,r_list,out_x=None,out_y=None,out_z=950,out_r=-90,exposure_time=0.1,repeat_num=3,sleep_time=600,relative_move_flag=0,chunk_size=5,simu=False,note='N83_insitu_pristine_filter_2+3+4')
yield from movpos(4, x_list, y_list, z_list, r_list)
yield from mv(zps.pi_r, -70)
yield from xanes_3D(Ni_list_3D, exposure_time=0.1, relative_rot_angle=145, period=0.1, out_x=None, out_y=None, out_z=2500, out_r=-20, rs=3, simu=False, relative_move_flag=1, note='N83_pos4')
yield from mv(zps.pi_r, 0)
def scan_change_expo_time(x_range, y_range, t1, t2, out_x=None, out_y=None, out_z=None, out_r=None, img_sizeX=2560, img_sizeY=2160, pxl=20, relative_move_flag=1, note='', simu=False, sleep_time=0, md=None):
'''
take image
'''
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
detectors = [Andor, ic3]
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if out_x else motor_x_ini
motor_y_out = motor_y_ini + out_y if out_y else motor_y_ini
motor_z_out = motor_z_ini + out_z if out_z else motor_z_ini
motor_r_out = motor_r_ini + out_r if out_r else motor_r_ini
else:
motor_x_out = out_x if out_x else motor_x_ini
motor_y_out = out_y if out_y else motor_y_ini
motor_z_out = out_z if out_z else motor_z_ini
motor_r_out = out_r if out_r else motor_r_ini
motor_eng = XEng
motor = [motor_eng, zps.sx, zps.sy, zps.sz, zps.pi_r]
_md = {
"detectors": [det.name for det in detectors],
"x_ray_energy": XEng.position,
"plan_args": {
"x_range": x_range,
"y_range": y_range,
"t1": t1,
"t2": t2,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"img_sizeX": img_sizeX,
"img_sizeY": img_sizeY,
"pxl": pxl,
"relative_move_flag": relative_move_flag,
"note": note if note else "None",
"sleep_time": sleep_time,
},
"plan_name": "scan_change_expo_time",
"hints": {},
"operator": "FXI",
"zone_plate": ZONE_PLATE,
}
_md.update(md or {})
try:
dimensions = [(motor.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
@stage_decorator(list(detectors) + motor)
@run_decorator(md=_md)
def inner():
# take dark image
print(f"take 5 dark image with exposure = {t1}")
yield from _set_andor_param(exposure_time=t1, period=t1, chunk_size=1)
yield from _take_dark_image(detectors, motor, num_dark=5, simu=simu)
print(f"take 5 dark image with exposure = {t2}")
yield from _set_andor_param(exposure_time=t2, period=t2, chunk_size=1)
yield from _take_dark_image(detectors, motor, num_dark=5, simu=simu)
print("open shutter ...")
yield from _open_shutter(simu)
for ii in np.arange(x_range[0], x_range[1] + 1):
for jj in np.arange(y_range[0], y_range[1] + 1):
yield from mv(zps.sx, motor_x_ini + ii * img_sizeX * pxl * 1.0 / 1000)
yield from mv(zps.sy, motor_y_ini + jj * img_sizeY * pxl * 1.0 / 1000)
yield from bps.sleep(0.1)
print(f'set exposure time = {t1}')
yield from _set_andor_param(exposure_time=t1, period=t1, chunk_size=1)
yield from bps.sleep(sleep_time)
yield from _take_image(detectors, motor, 1)
print(f'set exposure time = {t2}')
yield from _set_andor_param(exposure_time=t2, period=t2, chunk_size=1)
yield from bps.sleep(sleep_time)
yield from _take_image(detectors, motor, 1)
print(f'take bkg image with exposure time = {t1}')
yield from _set_andor_param(exposure_time=t1, period=t1, chunk_size=1)
yield from bps.sleep(sleep_time)
yield from _take_bkg_image(motor_x_out, motor_y_out, motor_z_out, motor_r_out,
detectors, motor, num_bkg=5, simu=simu)
print(f'take bkg image with exposure time = {t2}')
yield from _set_andor_param(exposure_time=t2, period=t2, chunk_size=1)
yield from bps.sleep(sleep_time)
yield from _take_bkg_image(motor_x_out, motor_y_out, motor_z_out, motor_r_out,
detectors, motor, num_bkg=5, simu=simu)
yield from _move_sample_in(motor_x_ini, motor_y_ini, motor_z_ini, motor_r_ini,
repeat=1, trans_first_flag=0)
print("closing shutter")
yield from _close_shutter(simu)
yield from inner()
txt = get_scan_parameter()
insert_text(txt)
print(txt)
|
py | b40235f547699ff78e86cbdae7c22bfca3ec61e0 | # -*- coding: utf-8 -*-
"""
This will generate the .pot and .mo files for the application domain and
languages defined below.
The .po and .mo files are placed as per convention in
"__packagename__/locale/lang/LC_MESSAGES"
The .pot file is placed in the locale folder.
This script or something similar should be added to your build process.
The actual translation work is normally done using a tool like poEdit or
similar, it allows you to generate a particular language catalog from the .pot
file or to use the .pot to merge new translations into an existing language
catalog.
"""
import subprocess
import sys
from pathlib import Path
import wx
__packagename__ = "youtube_dl_gui"
# language domain
langDomain = __packagename__
# languages you want to support
supLang = {
"en_US": wx.LANGUAGE_ENGLISH,
"es_CU": wx.LANGUAGE_SPANISH,
}
# we remove English as source code strings are in English
supportedLang = [lang for lang in supLang if lang != "en_US"]
PATH = Path(__file__).resolve().parent
appFolder = PATH.parent.joinpath(__packagename__)
# setup some stuff to get at Python I18N tools/utilities
pyPath = Path(sys.executable).resolve()
pyExe = pyPath.name
pyFolder = pyPath.parent
pyToolsFolder = pyFolder.joinpath("Tools")
pyI18nFolder = pyToolsFolder.joinpath("i18n")
pyGettext = pyI18nFolder.joinpath("pygettext.py")
pyMsgfmt = pyI18nFolder.joinpath("msgfmt.py")
outFolder = appFolder.joinpath("locale")
# build command for pygettext
gtOptions = f"-a -d {langDomain} -o {langDomain}.pot -p {outFolder} {appFolder}"
tCmd = f"{pyExe} {pyGettext} {gtOptions}"
print("Generating the .pot file")
print(f"cmd: {tCmd}")
rCode = subprocess.call(tCmd)
if rCode != 0:
sys.exit(f"return code: {rCode}")
print("")
for tLang in supportedLang:
# build command for msgfmt
langDir = appFolder.joinpath(f"locale/{tLang}/LC_MESSAGES")
poFile = langDir.joinpath(langDomain).with_suffix(".po")
tCmd = f"{pyExe} {pyMsgfmt} {poFile}"
print("Generating the .mo file")
print(f"cmd: {tCmd}")
rCode = subprocess.call(tCmd)
if rCode != 0:
sys.exit(f"return code: {rCode}")
|
py | b40236a9e6bd087f897b1119c2ae5426bb6595e4 | import pandas as pd
from pandas import DataFrame
from shift_detector.precalculations.precalculation import Precalculation
from shift_detector.utils.column_management import detect_column_types, ColumnType, \
CATEGORICAL_MAX_RELATIVE_CARDINALITY, column_names
from shift_detector.utils.custom_print import lprint
from shift_detector.utils.data_io import shared_column_names
from shift_detector.utils.errors import InsufficientDataError
MIN_DATA_SIZE = int(CATEGORICAL_MAX_RELATIVE_CARDINALITY * 100)
class Store:
def __init__(self,
df1: DataFrame,
df2: DataFrame,
log_print=False,
custom_column_types={}):
self.verify_min_data_size(min([len(df1), len(df2)]))
self.shared_columns = shared_column_names(df1, df2)
self.df1 = df1[self.shared_columns]
self.df2 = df2[self.shared_columns]
self.log_print = log_print
if not isinstance(custom_column_types, dict):
raise TypeError("column_types is not a dictionary."
"Received: {}".format(custom_column_types.__class__.__name__))
if any([not column for column in custom_column_types.keys()]):
raise TypeError("Not all keys of column_types are of type string."
"Received: {}".format(list(custom_column_types.keys())))
if any([not column_type for column_type in custom_column_types.values()]):
raise TypeError("Not all values of column_types are of type ColumnType."
"Received: {}".format(list(custom_column_types.values())))
self.type_to_columns = detect_column_types(self.df1, self.df2, self.shared_columns)
self.__apply_column_types(custom_column_types)
lprint("Numerical columns: {}".format(", ".join(column_names(self.column_names(ColumnType.numerical)))),
self.log_print)
lprint("Categorical columns: {}".format(", ".join(column_names(self.column_names(ColumnType.categorical)))),
self.log_print)
lprint("Text columns: {}".format(", ".join(column_names(self.column_names(ColumnType.text)))), self.log_print)
self.splitted_dfs = {column_type: (self.df1[columns], self.df2[columns])
for column_type, columns in self.type_to_columns.items()}
self.preprocessings = {}
def __getitem__(self, needed_preprocessing) -> DataFrame:
if isinstance(needed_preprocessing, ColumnType):
return self.splitted_dfs[needed_preprocessing]
if not isinstance(needed_preprocessing, Precalculation):
raise TypeError("Needed Preprocessing must be of type Precalculation or ColumnType")
if needed_preprocessing in self.preprocessings:
lprint("- Use already executed {}".format(needed_preprocessing.__class__.__name__), self.log_print)
return self.preprocessings[needed_preprocessing]
lprint("- Executing {}".format(needed_preprocessing.__class__.__name__), self.log_print)
preprocessing = needed_preprocessing.process(self)
lprint("- Finished Precalculation", self.log_print)
self.preprocessings[needed_preprocessing] = preprocessing
return preprocessing
def column_names(self, *column_types):
if not column_types:
return self.shared_columns
if any([not isinstance(column_type, ColumnType) for column_type in column_types]):
raise TypeError("column_types should be empty or of type ColumnType.")
multi_columns = [self.type_to_columns[column_type] for column_type in column_types]
flattened = {column for columns in multi_columns for column in columns}
return list(flattened)
@staticmethod
def verify_min_data_size(size):
if size < MIN_DATA_SIZE:
raise InsufficientDataError(actual_size=size, expected_size=MIN_DATA_SIZE)
def __apply_column_types(self, custom_column_to_column_type):
column_to_column_type = {}
for column_type, columns in self.type_to_columns.items():
# iterate over columns for column_types
for column in columns:
# apply custom column type
custom_column_type = column_type
if column in custom_column_to_column_type:
custom_column_type = custom_column_to_column_type[column]
column_to_column_type[column] = custom_column_type
self.__set_column_type(column, custom_column_type)
new_column_type_to_columns = {
ColumnType.categorical: [],
ColumnType.numerical: [],
ColumnType.text: []
}
# revert back to old column structure
for column, column_type in column_to_column_type.items():
new_column_type_to_columns[column_type].append(column)
self.type_to_columns = new_column_type_to_columns
def __set_column_type(self, column, column_type):
if column_type == ColumnType.numerical:
try:
self.df1[column] = pd.to_numeric(self.df1[column]).astype(float)
self.df2[column] = pd.to_numeric(self.df2[column]).astype(float)
except Exception as e:
raise Exception("An error occurred during the conversion of column '{}' to the column type '{}'. "
"{}".format(column, column_type.name, str(e)))
elif column_type == ColumnType.categorical or column_type == ColumnType.text:
self.df1[column] = self.df1[column].astype(str)
self.df2[column] = self.df2[column].astype(str)
|
bzl | b40238090f1e60ca0d95bca41d971c37332f1b34 | # Copyright 2018- The Pixie Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
load("@bazel_gazelle//:deps.bzl", "go_repository")
def pl_go_dependencies():
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=",
version = "v0.0.1-2020.1.4",
)
go_repository(
name = "com_github_agnivade_levenshtein",
importpath = "github.com/agnivade/levenshtein",
sum = "h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=",
version = "v1.0.1",
)
go_repository(
name = "com_github_ajg_form",
importpath = "github.com/ajg/form",
sum = "h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=",
version = "v1.5.1",
)
go_repository(
name = "com_github_alcortesm_tgz",
importpath = "github.com/alcortesm/tgz",
sum = "h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=",
version = "v0.0.0-20161220082320-9c5fe88206d7",
)
go_repository(
name = "com_github_alecthomas_assert",
importpath = "github.com/alecthomas/assert",
sum = "h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U=",
version = "v0.0.0-20170929043011-405dbfeb8e38",
)
go_repository(
name = "com_github_alecthomas_chroma",
importpath = "github.com/alecthomas/chroma",
sum = "h1:G1i02OhUbRi2nJxcNkwJaY/J1gHXj9tt72qN6ZouLFQ=",
version = "v0.7.1",
)
go_repository(
name = "com_github_alecthomas_colour",
importpath = "github.com/alecthomas/colour",
sum = "h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo=",
version = "v0.0.0-20160524082231-60882d9e2721",
)
go_repository(
name = "com_github_alecthomas_go_thrift",
importpath = "github.com/alecthomas/go-thrift",
sum = "h1:gKv6LPDhF/G3cNribA+kZtNPiPpKabZGLhcJuEtp3ig=",
version = "v0.0.0-20170109061633-7914173639b2",
)
go_repository(
name = "com_github_alecthomas_kong",
importpath = "github.com/alecthomas/kong",
sum = "h1:V1tLBhyQBC4rsbXbcOvm3GBaytJSwRNX69fp1WJxbqQ=",
version = "v0.2.1",
)
go_repository(
name = "com_github_alecthomas_participle",
importpath = "github.com/alecthomas/participle",
sum = "h1:P2PJWzwrSpuCWXKnzqvw0b0phSfH1kJo4p2HvLynVsI=",
version = "v0.4.1",
)
go_repository(
name = "com_github_alecthomas_repr",
importpath = "github.com/alecthomas/repr",
sum = "h1:GDQdwm/gAcJcLAKQQZGOJ4knlw+7rfEQQcmwTbt4p5E=",
version = "v0.0.0-20181024024818-d37bc2a10ba1",
)
go_repository(
name = "com_github_alecthomas_template",
importpath = "github.com/alecthomas/template",
sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=",
version = "v0.0.0-20190718012654-fb15b899a751",
)
go_repository(
name = "com_github_alecthomas_units",
importpath = "github.com/alecthomas/units",
sum = "h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=",
version = "v0.0.0-20190924025748-f65c72e2690d",
)
go_repository(
name = "com_github_alexflint_go_filemutex",
importpath = "github.com/alexflint/go-filemutex",
sum = "h1:AMzIhMUqU3jMrZiTuW0zkYeKlKDAFD+DG20IoO421/Y=",
version = "v0.0.0-20171022225611-72bdc8eae2ae",
)
go_repository(
name = "com_github_andreasbriese_bbloom",
importpath = "github.com/AndreasBriese/bbloom",
sum = "h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=",
version = "v0.0.0-20190306092124-e2d15f34fcf9",
)
go_repository(
name = "com_github_andreyvit_diff",
importpath = "github.com/andreyvit/diff",
sum = "h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=",
version = "v0.0.0-20170406064948-c7f18ee00883",
)
go_repository(
name = "com_github_andybalholm_cascadia",
importpath = "github.com/andybalholm/cascadia",
sum = "h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=",
version = "v1.1.0",
)
go_repository(
name = "com_github_anmitsu_go_shlex",
importpath = "github.com/anmitsu/go-shlex",
sum = "h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=",
version = "v0.0.0-20161002113705-648efa622239",
)
go_repository(
name = "com_github_antihax_optional",
importpath = "github.com/antihax/optional",
sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_armon_circbuf",
importpath = "github.com/armon/circbuf",
sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=",
version = "v0.0.0-20150827004946-bbbad097214e",
)
go_repository(
name = "com_github_armon_go_metrics",
importpath = "github.com/armon/go-metrics",
sum = "h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=",
version = "v0.0.0-20190430140413-ec5e00d3c878",
)
go_repository(
name = "com_github_armon_go_radix",
importpath = "github.com/armon/go-radix",
sum = "h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=",
version = "v0.0.0-20180808171621-7fddfc383310",
)
go_repository(
name = "com_github_armon_go_socks5",
importpath = "github.com/armon/go-socks5",
sum = "h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=",
version = "v0.0.0-20160902184237-e75332964ef5",
)
go_repository(
name = "com_github_asaskevich_govalidator",
importpath = "github.com/asaskevich/govalidator",
sum = "h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=",
version = "v0.0.0-20200907205600-7a23bdc65eef",
)
go_repository(
name = "com_github_aws_aws_sdk_go",
importpath = "github.com/aws/aws-sdk-go",
sum = "h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk=",
version = "v1.34.28",
)
go_repository(
name = "com_github_aymerick_raymond",
importpath = "github.com/aymerick/raymond",
sum = "h1:Ppm0npCCsmuR9oQaBtRuZcmILVE74aXE+AmrJj8L2ns=",
version = "v2.0.3-0.20180322193309-b565731e1464+incompatible",
)
go_repository(
name = "com_github_azure_azure_sdk_for_go",
importpath = "github.com/Azure/azure-sdk-for-go",
sum = "h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I=",
version = "v16.2.1+incompatible",
)
go_repository(
name = "com_github_azure_go_ansiterm",
importpath = "github.com/Azure/go-ansiterm",
replace = "github.com/Azure/go-ansiterm",
sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=",
version = "v0.0.0-20170929234023-d6e3b3328b78",
)
go_repository(
name = "com_github_azure_go_autorest",
importpath = "github.com/Azure/go-autorest",
replace = "github.com/Azure/go-autorest",
sum = "h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=",
version = "v14.2.0+incompatible",
)
go_repository(
name = "com_github_azure_go_autorest_autorest",
importpath = "github.com/Azure/go-autorest/autorest",
replace = "github.com/Azure/go-autorest/autorest",
sum = "h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=",
version = "v0.11.1",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_adal",
importpath = "github.com/Azure/go-autorest/autorest/adal",
sum = "h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=",
version = "v0.9.5",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_date",
importpath = "github.com/Azure/go-autorest/autorest/date",
sum = "h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=",
version = "v0.3.0",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_mocks",
importpath = "github.com/Azure/go-autorest/autorest/mocks",
sum = "h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=",
version = "v0.4.1",
)
go_repository(
name = "com_github_azure_go_autorest_logger",
importpath = "github.com/Azure/go-autorest/logger",
sum = "h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=",
version = "v0.2.0",
)
go_repository(
name = "com_github_azure_go_autorest_tracing",
importpath = "github.com/Azure/go-autorest/tracing",
sum = "h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=",
version = "v0.6.0",
)
go_repository(
name = "com_github_badoux_checkmail",
importpath = "github.com/badoux/checkmail",
sum = "h1:kXfVkP8xPSJXzicomzjECcw6tv1Wl9h1lNenWBfNKdg=",
version = "v0.0.0-20181210160741-9661bd69e9ad",
)
go_repository(
name = "com_github_bazelbuild_rules_go",
importpath = "github.com/bazelbuild/rules_go",
sum = "h1:U1fJbrwVyl0A5wAqWzSQoRutZHolAgKnURcoKRala+8=",
version = "v0.22.4",
)
go_repository(
name = "com_github_beorn7_perks",
importpath = "github.com/beorn7/perks",
sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=",
version = "v1.0.1",
)
go_repository(
name = "com_github_bgentry_speakeasy",
importpath = "github.com/bgentry/speakeasy",
sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_bitly_go_simplejson",
importpath = "github.com/bitly/go-simplejson",
sum = "h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=",
version = "v0.5.0",
)
go_repository(
name = "com_github_bits_and_blooms_bitset",
importpath = "github.com/bits-and-blooms/bitset",
sum = "h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=",
version = "v1.2.0",
)
go_repository(
name = "com_github_bketelsen_crypt",
importpath = "github.com/bketelsen/crypt",
sum = "h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU=",
version = "v0.0.4",
)
go_repository(
name = "com_github_blang_semver",
importpath = "github.com/blang/semver",
sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=",
version = "v3.5.1+incompatible",
)
go_repository(
name = "com_github_bmatcuk_doublestar",
importpath = "github.com/bmatcuk/doublestar",
sum = "h1:oC24CykoSAB8zd7XgruHo33E0cHJf/WhQA/7BeXj+x0=",
version = "v1.2.2",
)
go_repository(
name = "com_github_bmizerany_assert",
importpath = "github.com/bmizerany/assert",
sum = "h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=",
version = "v0.0.0-20160611221934-b7ed37b82869",
)
go_repository(
name = "com_github_boltdb_bolt",
importpath = "github.com/boltdb/bolt",
sum = "h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_bshuster_repo_logrus_logstash_hook",
importpath = "github.com/bshuster-repo/logrus-logstash-hook",
sum = "h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=",
version = "v0.4.1",
)
go_repository(
name = "com_github_buger_jsonparser",
importpath = "github.com/buger/jsonparser",
sum = "h1:y853v6rXx+zefEcjET3JuKAqvhj+FKflQijjeaSv2iA=",
version = "v0.0.0-20180808090653-f4dd9f5a6b44",
)
go_repository(
name = "com_github_bugsnag_bugsnag_go",
importpath = "github.com/bugsnag/bugsnag-go",
sum = "h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=",
version = "v0.0.0-20141110184014-b1d153021fcd",
)
go_repository(
name = "com_github_bugsnag_osext",
importpath = "github.com/bugsnag/osext",
sum = "h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=",
version = "v0.0.0-20130617224835-0dd3f918b21b",
)
go_repository(
name = "com_github_bugsnag_panicwrap",
importpath = "github.com/bugsnag/panicwrap",
sum = "h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=",
version = "v0.0.0-20151223152923-e2c28503fcd0",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_cenkalti_backoff_v3",
importpath = "github.com/cenkalti/backoff/v3",
sum = "h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=",
version = "v3.2.2",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_cespare_xxhash",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cespare_xxhash_v2",
importpath = "github.com/cespare/xxhash/v2",
sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=",
version = "v2.1.1",
)
go_repository(
name = "com_github_chai2010_gettext_go",
importpath = "github.com/chai2010/gettext-go",
sum = "h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=",
version = "v0.0.0-20160711120539-c6fed771bfd5",
)
go_repository(
name = "com_github_checkpoint_restore_go_criu_v4",
importpath = "github.com/checkpoint-restore/go-criu/v4",
sum = "h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo=",
version = "v4.1.0",
)
go_repository(
name = "com_github_checkpoint_restore_go_criu_v5",
importpath = "github.com/checkpoint-restore/go-criu/v5",
sum = "h1:TW8f/UvntYoVDMN1K2HlT82qH1rb0sOjpGw3m6Ym+i4=",
version = "v5.0.0",
)
go_repository(
name = "com_github_chzyer_logex",
importpath = "github.com/chzyer/logex",
sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
version = "v1.1.10",
)
go_repository(
name = "com_github_chzyer_readline",
importpath = "github.com/chzyer/readline",
sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
version = "v0.0.0-20180603132655-2972be24d48e",
)
go_repository(
name = "com_github_chzyer_test",
importpath = "github.com/chzyer/test",
sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
version = "v0.0.0-20180213035817-a1ea475d72b1",
)
go_repository(
name = "com_github_cilium_ebpf",
importpath = "github.com/cilium/ebpf",
sum = "h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI=",
version = "v0.6.2",
)
go_repository(
name = "com_github_circonus_labs_circonus_gometrics",
importpath = "github.com/circonus-labs/circonus-gometrics",
sum = "h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=",
version = "v2.3.1+incompatible",
)
go_repository(
name = "com_github_circonus_labs_circonusllhist",
importpath = "github.com/circonus-labs/circonusllhist",
sum = "h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=",
version = "v0.1.3",
)
go_repository(
name = "com_github_cloudykit_fastprinter",
importpath = "github.com/CloudyKit/fastprinter",
sum = "h1:3SgJcK9l5uPdBC/X17wanyJAMxM33+4ZhEIV96MIH8U=",
version = "v0.0.0-20170127035650-74b38d55f37a",
)
go_repository(
name = "com_github_cloudykit_jet",
importpath = "github.com/CloudyKit/jet",
sum = "h1:rZgFj+Gtf3NMi/U5FvCvhzaxzW/TaPYgUYx3bAPz9DE=",
version = "v2.1.3-0.20180809161101-62edd43e4f88+incompatible",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=",
version = "v0.0.0-20201120205902-5459f2c99403",
)
go_repository(
name = "com_github_cockroachdb_apd",
importpath = "github.com/cockroachdb/apd",
sum = "h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cockroachdb_datadriven",
importpath = "github.com/cockroachdb/datadriven",
sum = "h1:uhZrAfEayBecH2w2tZmhe20HJ7hDvrrA4x2Bg9YdZKM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_cockroachdb_errors",
build_file_proto_mode = "disable",
importpath = "github.com/cockroachdb/errors",
sum = "h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y=",
version = "v1.8.1",
)
go_repository(
name = "com_github_cockroachdb_logtags",
importpath = "github.com/cockroachdb/logtags",
sum = "h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=",
version = "v0.0.0-20190617123548-eb05cc24525f",
)
go_repository(
name = "com_github_cockroachdb_pebble",
importpath = "github.com/cockroachdb/pebble",
sum = "h1:7e89t8ISz2n6jc1PzGuLbnsYcaaVmGR13fwx1yJi5a8=",
version = "v0.0.0-20210120202502-6110b03a8a85",
)
go_repository(
name = "com_github_cockroachdb_redact",
importpath = "github.com/cockroachdb/redact",
sum = "h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw=",
version = "v1.0.8",
)
go_repository(
name = "com_github_cockroachdb_sentry_go",
importpath = "github.com/cockroachdb/sentry-go",
sum = "h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=",
version = "v0.6.1-cockroachdb.2",
)
go_repository(
name = "com_github_codahale_hdrhistogram",
importpath = "github.com/codahale/hdrhistogram",
sum = "h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=",
version = "v0.0.0-20161010025455-3a0bb77429bd",
)
go_repository(
name = "com_github_codegangsta_inject",
importpath = "github.com/codegangsta/inject",
sum = "h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=",
version = "v0.0.0-20150114235600-33e0aa1cb7c0",
)
go_repository(
name = "com_github_containerd_aufs",
importpath = "github.com/containerd/aufs",
sum = "h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_containerd_btrfs",
importpath = "github.com/containerd/btrfs",
sum = "h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_containerd_cgroups",
importpath = "github.com/containerd/cgroups",
sum = "h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=",
version = "v1.0.1",
)
go_repository(
name = "com_github_containerd_console",
importpath = "github.com/containerd/console",
sum = "h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=",
version = "v1.0.2",
)
go_repository(
name = "com_github_containerd_containerd",
importpath = "github.com/containerd/containerd",
sum = "h1:q1gxsZsGZ8ddVe98yO6pR21b5xQSMiR61lD0W96pgQo=",
version = "v1.5.5",
)
go_repository(
name = "com_github_containerd_continuity",
importpath = "github.com/containerd/continuity",
sum = "h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=",
version = "v0.1.0",
)
go_repository(
name = "com_github_containerd_fifo",
importpath = "github.com/containerd/fifo",
sum = "h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=",
version = "v1.0.0",
)
go_repository(
name = "com_github_containerd_go_cni",
importpath = "github.com/containerd/go-cni",
sum = "h1:YbJAhpTevL2v6u8JC1NhCYRwf+3Vzxcc5vGnYoJ7VeE=",
version = "v1.0.2",
)
go_repository(
name = "com_github_containerd_go_runc",
importpath = "github.com/containerd/go-runc",
sum = "h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_containerd_imgcrypt",
importpath = "github.com/containerd/imgcrypt",
sum = "h1:LBwiTfoUsdiEGAR1TpvxE+Gzt7469oVu87iR3mv3Byc=",
version = "v1.1.1",
)
go_repository(
name = "com_github_containerd_nri",
importpath = "github.com/containerd/nri",
sum = "h1:6QioHRlThlKh2RkRTR4kIT3PKAcrLo3gIWnjkM4dQmQ=",
version = "v0.1.0",
)
go_repository(
name = "com_github_containerd_ttrpc",
importpath = "github.com/containerd/ttrpc",
sum = "h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=",
version = "v1.0.2",
)
go_repository(
name = "com_github_containerd_typeurl",
importpath = "github.com/containerd/typeurl",
sum = "h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=",
version = "v1.0.2",
)
go_repository(
name = "com_github_containerd_zfs",
importpath = "github.com/containerd/zfs",
sum = "h1:cXLJbx+4Jj7rNsTiqVfm6i+RNLx6FFA2fMmDlEf+Wm8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_containernetworking_cni",
importpath = "github.com/containernetworking/cni",
sum = "h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=",
version = "v0.8.1",
)
go_repository(
name = "com_github_containernetworking_plugins",
importpath = "github.com/containernetworking/plugins",
sum = "h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9r2Quag7HMLV8=",
version = "v0.9.1",
)
go_repository(
name = "com_github_containers_ocicrypt",
importpath = "github.com/containers/ocicrypt",
sum = "h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI=",
version = "v1.1.1",
)
go_repository(
name = "com_github_coreos_go_iptables",
importpath = "github.com/coreos/go-iptables",
sum = "h1:mw6SAibtHKZcNzAsOxjoHIG0gy5YFHhypWSSNc6EjbQ=",
version = "v0.5.0",
)
go_repository(
name = "com_github_coreos_go_oidc",
importpath = "github.com/coreos/go-oidc",
sum = "h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
importpath = "github.com/coreos/go-semver",
sum = "h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=",
version = "v0.3.0",
)
go_repository(
name = "com_github_coreos_go_systemd",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_go_systemd_v22",
importpath = "github.com/coreos/go-systemd/v22",
sum = "h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=",
version = "v22.3.2",
)
go_repository(
name = "com_github_coreos_pkg",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_cosiner_argv",
importpath = "github.com/cosiner/argv",
sum = "h1:BVDiEL32lwHukgJKP87btEPenzrrHUjajs/8yzaqcXg=",
version = "v0.1.0",
)
go_repository(
name = "com_github_cpuguy83_go_md2man",
importpath = "github.com/cpuguy83/go-md2man",
sum = "h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=",
version = "v1.0.10",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_creack_pty",
importpath = "github.com/creack/pty",
sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=",
version = "v1.1.9",
)
go_repository(
name = "com_github_cyphar_filepath_securejoin",
importpath = "github.com/cyphar/filepath-securejoin",
sum = "h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=",
version = "v0.2.2",
)
go_repository(
name = "com_github_d2g_dhcp4",
importpath = "github.com/d2g/dhcp4",
sum = "h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc=",
version = "v0.0.0-20170904100407-a1d1b6c41b1c",
)
go_repository(
name = "com_github_d2g_dhcp4client",
importpath = "github.com/d2g/dhcp4client",
sum = "h1:suYBsYZIkSlUMEz4TAYCczKf62IA2UWC+O8+KtdOhCo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_d2g_dhcp4server",
importpath = "github.com/d2g/dhcp4server",
sum = "h1:+CpLbZIeUn94m02LdEKPcgErLJ347NUwxPKs5u8ieiY=",
version = "v0.0.0-20181031114812-7d4a0a7f59a5",
)
go_repository(
name = "com_github_d2g_hardwareaddr",
importpath = "github.com/d2g/hardwareaddr",
sum = "h1:itqmmf1PFpC4n5JW+j4BU7X4MTfVurhYRTjODoPb2Y8=",
version = "v0.0.0-20190221164911-e7d9fbe030e4",
)
go_repository(
name = "com_github_danwakefield_fnmatch",
importpath = "github.com/danwakefield/fnmatch",
sum = "h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=",
version = "v0.0.0-20160403171240-cbb64ac3d964",
)
go_repository(
name = "com_github_data_dog_go_sqlmock",
importpath = "github.com/DATA-DOG/go-sqlmock",
sum = "h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=",
version = "v1.3.3",
)
go_repository(
name = "com_github_datadog_datadog_go",
importpath = "github.com/DataDog/datadog-go",
sum = "h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=",
version = "v2.2.0+incompatible",
)
go_repository(
name = "com_github_datadog_zstd",
importpath = "github.com/DataDog/zstd",
sum = "h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=",
version = "v1.4.1",
)
go_repository(
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_daviddengcn_go_colortext",
importpath = "github.com/daviddengcn/go-colortext",
sum = "h1:uVsMphB1eRx7xB1njzL3fuMdWRN8HtVzoUOItHMwv5c=",
version = "v0.0.0-20160507010035-511bcaf42ccd",
)
go_repository(
name = "com_github_decred_dcrd_chaincfg_chainhash",
importpath = "github.com/decred/dcrd/chaincfg/chainhash",
sum = "h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU=",
version = "v1.0.2",
)
go_repository(
name = "com_github_decred_dcrd_crypto_blake256",
importpath = "github.com/decred/dcrd/crypto/blake256",
sum = "h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_decred_dcrd_dcrec_secp256k1_v3",
importpath = "github.com/decred/dcrd/dcrec/secp256k1/v3",
sum = "h1:sgNeV1VRMDzs6rzyPpxyM0jp317hnwiq58Filgag2xw=",
version = "v3.0.0",
)
go_repository(
name = "com_github_denverdino_aliyungo",
importpath = "github.com/denverdino/aliyungo",
sum = "h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=",
version = "v0.0.0-20190125010748-a747050bb1ba",
)
go_repository(
name = "com_github_dgraph_io_badger",
importpath = "github.com/dgraph-io/badger",
sum = "h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo=",
version = "v1.6.0",
)
go_repository(
name = "com_github_dgraph_io_badger_v3",
build_file_proto_mode = "disable",
importpath = "github.com/dgraph-io/badger/v3",
sum = "h1:Hmyof0WMEF/QtutX5SQHzIMnJQxb/IrSzhjckV2SD6g=",
version = "v3.2011.1",
)
go_repository(
name = "com_github_dgraph_io_ristretto",
importpath = "github.com/dgraph-io/ristretto",
sum = "h1:eQYOG6A4td1tht0NdJB9Ls6DsXRGb2Ft6X9REU/MbbE=",
version = "v0.0.4-0.20210122082011-bb5d392ed82d",
)
go_repository(
name = "com_github_dgrijalva_jwt_go",
importpath = "github.com/dgrijalva/jwt-go",
sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_dgrijalva_jwt_go_v4",
importpath = "github.com/dgrijalva/jwt-go/v4",
sum = "h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU=",
version = "v4.0.0-preview1",
)
go_repository(
name = "com_github_dgryski_go_farm",
importpath = "github.com/dgryski/go-farm",
sum = "h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=",
version = "v0.0.0-20190423205320-6a90982ecee2",
)
go_repository(
name = "com_github_dlclark_regexp2",
importpath = "github.com/dlclark/regexp2",
sum = "h1:CqB4MjHw0MFCDj+PHHjiESmHX+N7t0tJzKvC6M97BRg=",
version = "v1.1.6",
)
go_repository(
name = "com_github_dnaeon_go_vcr",
importpath = "github.com/dnaeon/go-vcr",
sum = "h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=",
version = "v1.0.1",
)
go_repository(
name = "com_github_docker_distribution",
importpath = "github.com/docker/distribution",
sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=",
version = "v2.7.1+incompatible",
)
go_repository(
name = "com_github_docker_docker",
importpath = "github.com/docker/docker",
replace = "github.com/moby/moby",
sum = "h1:X1Kfy/GrYL4UMcxWrIZCw4saZsIbd+W/++w6HA6STb8=",
version = "v20.10.5+incompatible",
)
go_repository(
name = "com_github_docker_go_connections",
importpath = "github.com/docker/go-connections",
sum = "h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=",
version = "v0.4.0",
)
go_repository(
name = "com_github_docker_go_events",
importpath = "github.com/docker/go-events",
sum = "h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=",
version = "v0.0.0-20190806004212-e31b211e4f1c",
)
go_repository(
name = "com_github_docker_go_metrics",
importpath = "github.com/docker/go-metrics",
sum = "h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=",
version = "v0.0.1",
)
go_repository(
name = "com_github_docker_go_units",
importpath = "github.com/docker/go-units",
sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=",
version = "v0.4.0",
)
go_repository(
name = "com_github_docker_libtrust",
importpath = "github.com/docker/libtrust",
sum = "h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=",
version = "v0.0.0-20150114040149-fa567046d9b1",
)
go_repository(
name = "com_github_docker_spdystream",
importpath = "github.com/docker/spdystream",
sum = "h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=",
version = "v0.0.0-20160310174837-449fdfce4d96",
)
go_repository(
name = "com_github_docopt_docopt_go",
importpath = "github.com/docopt/docopt-go",
sum = "h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=",
version = "v0.0.0-20180111231733-ee0de3bc6815",
)
go_repository(
name = "com_github_dustin_go_humanize",
importpath = "github.com/dustin/go-humanize",
sum = "h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_eknkc_amber",
importpath = "github.com/eknkc/amber",
sum = "h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=",
version = "v0.0.0-20171010120322-cdade1c07385",
)
go_repository(
name = "com_github_elazarl_goproxy",
importpath = "github.com/elazarl/goproxy",
sum = "h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=",
version = "v0.0.0-20180725130230-947c36da3153",
)
go_repository(
name = "com_github_emicklei_dot",
importpath = "github.com/emicklei/dot",
sum = "h1:bkzvwgIhhw/cuxxnJy5/5+ZL3GnhFxFfv0eolHtWE2w=",
version = "v0.10.1",
)
go_repository(
name = "com_github_emicklei_go_restful",
importpath = "github.com/emicklei/go-restful",
sum = "h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=",
version = "v2.9.5+incompatible",
)
go_repository(
name = "com_github_emirpasic_gods",
importpath = "github.com/emirpasic/gods",
sum = "h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=",
version = "v1.12.0",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE=",
version = "v0.9.9-0.20201210154907-fd9021fe5dad",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_etcd_io_bbolt",
importpath = "github.com/etcd-io/bbolt",
sum = "h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=",
version = "v1.3.3",
)
go_repository(
name = "com_github_evanphx_json_patch",
importpath = "github.com/evanphx/json-patch",
sum = "h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=",
version = "v4.9.0+incompatible",
)
go_repository(
name = "com_github_evilsuperstars_go_cidrman",
importpath = "github.com/EvilSuperstars/go-cidrman",
sum = "h1:D9u6wYxZ2bPjDwYYq25y+n6ZmOKj/TsAMGSl4xL1yQI=",
version = "v0.0.0-20190607145828-28e79e32899a",
)
go_repository(
name = "com_github_exponent_io_jsonpath",
importpath = "github.com/exponent-io/jsonpath",
sum = "h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=",
version = "v0.0.0-20151013193312-d6023ce2651d",
)
go_repository(
name = "com_github_fasthttp_contrib_websocket",
importpath = "github.com/fasthttp-contrib/websocket",
sum = "h1:DddqAaWDpywytcG8w/qoQ5sAN8X12d3Z3koB0C3Rxsc=",
version = "v0.0.0-20160511215533-1f3b11f56072",
)
go_repository(
name = "com_github_fatih_camelcase",
importpath = "github.com/fatih/camelcase",
sum = "h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_fatih_color",
importpath = "github.com/fatih/color",
sum = "h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=",
version = "v1.10.0",
)
go_repository(
name = "com_github_fatih_structs",
importpath = "github.com/fatih/structs",
sum = "h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=",
version = "v1.1.0",
)
go_repository(
name = "com_github_felixge_httpsnoop",
importpath = "github.com/felixge/httpsnoop",
sum = "h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=",
version = "v1.0.1",
)
go_repository(
name = "com_github_flosch_pongo2",
importpath = "github.com/flosch/pongo2",
sum = "h1:GY1+t5Dr9OKADM64SYnQjw/w99HMYvQ0A8/JoUkxVmc=",
version = "v0.0.0-20190707114632-bbf5a6c351f4",
)
go_repository(
name = "com_github_flynn_go_shlex",
importpath = "github.com/flynn/go-shlex",
sum = "h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=",
version = "v0.0.0-20150515145356-3f9db97f8568",
)
go_repository(
name = "com_github_form3tech_oss_jwt_go",
importpath = "github.com/form3tech-oss/jwt-go",
sum = "h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=",
version = "v3.2.2+incompatible",
)
go_repository(
name = "com_github_fortytw2_leaktest",
importpath = "github.com/fortytw2/leaktest",
sum = "h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=",
version = "v1.3.0",
)
go_repository(
name = "com_github_frankban_quicktest",
importpath = "github.com/frankban/quicktest",
sum = "h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=",
version = "v1.11.3",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=",
version = "v1.4.9",
)
go_repository(
name = "com_github_fullsailor_pkcs7",
importpath = "github.com/fullsailor/pkcs7",
sum = "h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=",
version = "v0.0.0-20190404230743-d7302db945fa",
)
go_repository(
name = "com_github_fvbommel_sortorder",
importpath = "github.com/fvbommel/sortorder",
sum = "h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE=",
version = "v1.0.1",
)
go_repository(
name = "com_github_garyburd_redigo",
importpath = "github.com/garyburd/redigo",
sum = "h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko=",
version = "v0.0.0-20150301180006-535138d7bcd7",
)
go_repository(
name = "com_github_gavv_httpexpect",
importpath = "github.com/gavv/httpexpect",
sum = "h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_gdamore_encoding",
importpath = "github.com/gdamore/encoding",
sum = "h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=",
version = "v1.0.0",
)
go_repository(
name = "com_github_gdamore_tcell",
importpath = "github.com/gdamore/tcell",
sum = "h1:r35w0JBADPZCVQijYebl6YMWWtHRqVEGt7kL2eBADRM=",
version = "v1.3.0",
)
go_repository(
name = "com_github_getsentry_sentry_go",
importpath = "github.com/getsentry/sentry-go",
sum = "h1:MIPe7ScHADsrK2vznqmhksIUFxq7m0JfTh+ZIMkI+VQ=",
version = "v0.5.1",
)
go_repository(
name = "com_github_ghemawat_stream",
importpath = "github.com/ghemawat/stream",
sum = "h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=",
version = "v0.0.0-20171120220530-696b145b53b9",
)
go_repository(
name = "com_github_ghodss_yaml",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_gin_contrib_sse",
importpath = "github.com/gin-contrib/sse",
sum = "h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g=",
version = "v0.0.0-20190301062529-5545eab6dad3",
)
go_repository(
name = "com_github_gin_gonic_gin",
importpath = "github.com/gin-gonic/gin",
sum = "h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gliderlabs_ssh",
importpath = "github.com/gliderlabs/ssh",
sum = "h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=",
version = "v0.2.2",
)
go_repository(
name = "com_github_globalsign_mgo",
importpath = "github.com/globalsign/mgo",
sum = "h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=",
version = "v0.0.0-20181015135952-eeefdecb41b8",
)
# keep
go_repository(
name = "com_github_go_bindata_go_bindata",
importpath = "github.com/go-bindata/go-bindata",
sum = "h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE=",
version = "v3.1.2+incompatible",
)
go_repository(
name = "com_github_go_check_check",
importpath = "github.com/go-check/check",
sum = "h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=",
version = "v0.0.0-20180628173108-788fd7840127",
)
go_repository(
name = "com_github_go_delve_delve",
importpath = "github.com/go-delve/delve",
sum = "h1:gQsRvFdR0BGk19NROQZsAv6iG4w5QIZoJlxJeEUBb0c=",
version = "v1.5.0",
)
go_repository(
name = "com_github_go_errors_errors",
importpath = "github.com/go-errors/errors",
sum = "h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=",
version = "v1.0.1",
)
go_repository(
name = "com_github_go_gl_glfw_v3_3_glfw",
importpath = "github.com/go-gl/glfw/v3.3/glfw",
sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
version = "v0.0.0-20200222043503-6f7a984d4dc4",
)
go_repository(
name = "com_github_go_ini_ini",
importpath = "github.com/go-ini/ini",
sum = "h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=",
version = "v1.25.4",
)
go_repository(
name = "com_github_go_kit_kit",
importpath = "github.com/go-kit/kit",
sum = "h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=",
version = "v0.9.0",
)
go_repository(
name = "com_github_go_kit_log",
importpath = "github.com/go-kit/log",
sum = "h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=",
version = "v0.1.0",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=",
version = "v0.5.0",
)
go_repository(
name = "com_github_go_logr_logr",
importpath = "github.com/go-logr/logr",
sum = "h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=",
version = "v0.4.0",
)
go_repository(
name = "com_github_go_logr_zapr",
importpath = "github.com/go-logr/zapr",
sum = "h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=",
version = "v0.2.0",
)
go_repository(
name = "com_github_go_martini_martini",
importpath = "github.com/go-martini/martini",
sum = "h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=",
version = "v0.0.0-20170121215854-22fa46961aab",
)
go_repository(
name = "com_github_go_openapi_analysis",
importpath = "github.com/go-openapi/analysis",
replace = "github.com/go-openapi/analysis",
sum = "h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=",
version = "v0.19.5",
)
go_repository(
name = "com_github_go_openapi_errors",
importpath = "github.com/go-openapi/errors",
sum = "h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4=",
version = "v0.19.9",
)
go_repository(
name = "com_github_go_openapi_jsonpointer",
importpath = "github.com/go-openapi/jsonpointer",
sum = "h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=",
version = "v0.19.5",
)
go_repository(
name = "com_github_go_openapi_jsonreference",
importpath = "github.com/go-openapi/jsonreference",
sum = "h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_loads",
importpath = "github.com/go-openapi/loads",
replace = "github.com/go-openapi/loads",
sum = "h1:wCOBNscACI8L93tt5tvB2zOMkJ098XCw3fP0BY2ybDA=",
version = "v0.19.0",
)
go_repository(
name = "com_github_go_openapi_runtime",
importpath = "github.com/go-openapi/runtime",
sum = "h1:K/6PoVNj5WJXUnMk+VEbELeXjtBkCS1UxTDa04tdXE0=",
version = "v0.19.26",
)
go_repository(
name = "com_github_go_openapi_spec",
importpath = "github.com/go-openapi/spec",
replace = "github.com/go-openapi/spec",
sum = "h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_strfmt",
importpath = "github.com/go-openapi/strfmt",
replace = "github.com/go-openapi/strfmt",
sum = "h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM=",
version = "v0.20.0",
)
go_repository(
name = "com_github_go_openapi_swag",
importpath = "github.com/go-openapi/swag",
sum = "h1:233UVgMy1DlmCYYfOiFpta6e2urloh+sEs5id6lyzog=",
version = "v0.19.13",
)
go_repository(
name = "com_github_go_openapi_validate",
importpath = "github.com/go-openapi/validate",
sum = "h1:QGQ5CvK74E28t3DkegGweKR+auemUi5IdpMc4x3UW6s=",
version = "v0.20.1",
)
go_repository(
name = "com_github_go_sql_driver_mysql",
importpath = "github.com/go-sql-driver/mysql",
sum = "h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=",
version = "v1.5.0",
)
go_repository(
name = "com_github_go_stack_stack",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_gobuffalo_attrs",
importpath = "github.com/gobuffalo/attrs",
sum = "h1:hSkbZ9XSyjyBirMeqSqUrK+9HboWrweVlzRNqoBi2d4=",
version = "v0.0.0-20190224210810-a9411de4debd",
)
go_repository(
name = "com_github_gobuffalo_depgen",
importpath = "github.com/gobuffalo/depgen",
sum = "h1:31atYa/UW9V5q8vMJ+W6wd64OaaTHUrCUXER358zLM4=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gobuffalo_envy",
importpath = "github.com/gobuffalo/envy",
sum = "h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU=",
version = "v1.7.0",
)
go_repository(
name = "com_github_gobuffalo_flect",
importpath = "github.com/gobuffalo/flect",
sum = "h1:3GQ53z7E3o00C/yy7Ko8VXqQXoJGLkrTQCLTF1EjoXU=",
version = "v0.1.3",
)
go_repository(
name = "com_github_gobuffalo_genny",
importpath = "github.com/gobuffalo/genny",
sum = "h1:iQ0D6SpNXIxu52WESsD+KoQ7af2e3nCfnSBoSF/hKe0=",
version = "v0.1.1",
)
go_repository(
name = "com_github_gobuffalo_gitgen",
importpath = "github.com/gobuffalo/gitgen",
sum = "h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8=",
version = "v0.0.0-20190315122116-cc086187d211",
)
go_repository(
name = "com_github_gobuffalo_gogen",
importpath = "github.com/gobuffalo/gogen",
sum = "h1:dLg+zb+uOyd/mKeQUYIbwbNmfRsr9hd/WtYWepmayhI=",
version = "v0.1.1",
)
go_repository(
name = "com_github_gobuffalo_logger",
importpath = "github.com/gobuffalo/logger",
sum = "h1:8thhT+kUJMTMy3HlX4+y9Da+BNJck+p109tqqKp7WDs=",
version = "v0.0.0-20190315122211-86e12af44bc2",
)
go_repository(
name = "com_github_gobuffalo_mapi",
importpath = "github.com/gobuffalo/mapi",
sum = "h1:fq9WcL1BYrm36SzK6+aAnZ8hcp+SrmnDyAxhNx8dvJk=",
version = "v1.0.2",
)
go_repository(
name = "com_github_gobuffalo_packd",
importpath = "github.com/gobuffalo/packd",
sum = "h1:4sGKOD8yaYJ+dek1FDkwcxCHA40M4kfKgFHx8N2kwbU=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gobuffalo_packr_v2",
importpath = "github.com/gobuffalo/packr/v2",
sum = "h1:Ir9W9XIm9j7bhhkKE9cokvtTl1vBm62A/fene/ZCj6A=",
version = "v2.2.0",
)
go_repository(
name = "com_github_gobuffalo_syncx",
importpath = "github.com/gobuffalo/syncx",
sum = "h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4=",
version = "v0.0.0-20190224160051-33c29581e754",
)
go_repository(
name = "com_github_gobwas_httphead",
importpath = "github.com/gobwas/httphead",
sum = "h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=",
version = "v0.0.0-20180130184737-2c6c146eadee",
)
go_repository(
name = "com_github_gobwas_pool",
importpath = "github.com/gobwas/pool",
sum = "h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=",
version = "v0.2.0",
)
go_repository(
name = "com_github_gobwas_ws",
importpath = "github.com/gobwas/ws",
sum = "h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=",
version = "v1.0.2",
)
go_repository(
name = "com_github_goccy_go_json",
importpath = "github.com/goccy/go-json",
sum = "h1:B44qRUFwz/vxPKPISQ1KhvzRi9kZ28RAf6YtjriBZ5k=",
version = "v0.7.4",
)
go_repository(
name = "com_github_godbus_dbus",
importpath = "github.com/godbus/dbus",
sum = "h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=",
version = "v0.0.0-20190422162347-ade71ed3457e",
)
go_repository(
name = "com_github_godbus_dbus_v5",
importpath = "github.com/godbus/dbus/v5",
sum = "h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=",
version = "v5.0.4",
)
go_repository(
name = "com_github_gofrs_uuid",
importpath = "github.com/gofrs/uuid",
sum = "h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=",
version = "v4.0.0+incompatible",
)
go_repository(
name = "com_github_gogo_googleapis",
importpath = "github.com/gogo/googleapis",
sum = "h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gogo_protobuf",
importpath = "github.com/gogo/protobuf",
sum = "h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=",
version = "v1.3.2",
)
go_repository(
name = "com_github_gogo_status",
importpath = "github.com/gogo/status",
sum = "h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA=",
version = "v1.1.0",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=",
version = "v0.0.0-20200121045136-8c9f03a8e57e",
)
go_repository(
name = "com_github_golang_migrate_migrate",
importpath = "github.com/golang-migrate/migrate",
sum = "h1:R7OzwvCJTCgwapPCiX6DyBiu2czIUMDCB118gFTKTUA=",
version = "v3.5.4+incompatible",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=",
version = "v1.5.0",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
replace = "github.com/golang/protobuf",
sum = "h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=",
version = "v1.5.2",
)
go_repository(
name = "com_github_golang_snappy",
importpath = "github.com/golang/snappy",
sum = "h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws=",
version = "v0.0.2-0.20190904063534-ff6b7dc882cf",
)
go_repository(
name = "com_github_golangplus_bytes",
importpath = "github.com/golangplus/bytes",
sum = "h1:7xqw01UYS+KCI25bMrPxwNYkSns2Db1ziQPpVq99FpE=",
version = "v0.0.0-20160111154220-45c989fe5450",
)
go_repository(
name = "com_github_golangplus_fmt",
importpath = "github.com/golangplus/fmt",
sum = "h1:f5gsjBiF9tRRVomCvrkGMMWI8W1f2OBFar2c5oakAP0=",
version = "v0.0.0-20150411045040-2a5d6d7d2995",
)
go_repository(
name = "com_github_golangplus_testing",
importpath = "github.com/golangplus/testing",
sum = "h1:KhcknUwkWHKZPbFy2P7jH5LKJ3La+0ZeknkkmrSgqb0=",
version = "v0.0.0-20180327235837-af21d9c3145e",
)
go_repository(
name = "com_github_gomodule_redigo",
importpath = "github.com/gomodule/redigo",
sum = "h1:y0Wmhvml7cGnzPa9nocn/fMraMH/lMDdeG+rkx4VgYY=",
version = "v1.7.1-0.20190724094224-574c33c3df38",
)
go_repository(
name = "com_github_google_btree",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_flatbuffers",
importpath = "github.com/google/flatbuffers",
sum = "h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w=",
version = "v1.12.0",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
replace = "github.com/google/go-cmp",
sum = "h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=",
version = "v0.5.5",
)
go_repository(
name = "com_github_google_go_dap",
importpath = "github.com/google/go-dap",
sum = "h1:whjIGQRumwbR40qRU7CEKuFLmePUUc2s4Nt9DoXXxWk=",
version = "v0.2.0",
)
go_repository(
name = "com_github_google_go_github_v32",
importpath = "github.com/google/go-github/v32",
sum = "h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II=",
version = "v32.1.0",
)
go_repository(
name = "com_github_google_go_querystring",
importpath = "github.com/google/go-querystring",
sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_gofuzz",
importpath = "github.com/google/gofuzz",
sum = "h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=",
version = "v1.1.0",
)
go_repository(
name = "com_github_google_martian_v3",
importpath = "github.com/google/martian/v3",
sum = "h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=",
version = "v3.1.0",
)
go_repository(
name = "com_github_google_pprof",
importpath = "github.com/google/pprof",
sum = "h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo=",
version = "v0.0.0-20210226084205-cbba55b83ad5",
)
go_repository(
name = "com_github_google_renameio",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_google_uuid",
importpath = "github.com/google/uuid",
sum = "h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=",
version = "v1.2.0",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
importpath = "github.com/googleapis/gax-go/v2",
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
version = "v2.0.5",
)
go_repository(
name = "com_github_googleapis_gnostic",
importpath = "github.com/googleapis/gnostic",
sum = "h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=",
version = "v0.5.1",
)
go_repository(
name = "com_github_googleapis_google_cloud_go_testing",
importpath = "github.com/googleapis/google-cloud-go-testing",
sum = "h1:YBqybTXA//1pltKcwyntNQdgDw6AnA5oHZCXFOiZhoo=",
version = "v0.0.0-20191008195207-8e1d251e947d",
)
go_repository(
name = "com_github_gopherjs_gopherjs",
importpath = "github.com/gopherjs/gopherjs",
sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=",
version = "v0.0.0-20181017120253-0766667cb4d1",
)
go_repository(
name = "com_github_gorilla_handlers",
importpath = "github.com/gorilla/handlers",
sum = "h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=",
version = "v1.5.1",
)
go_repository(
name = "com_github_gorilla_mux",
importpath = "github.com/gorilla/mux",
sum = "h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=",
version = "v1.7.2",
)
go_repository(
name = "com_github_gorilla_securecookie",
importpath = "github.com/gorilla/securecookie",
sum = "h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=",
version = "v1.1.1",
)
go_repository(
name = "com_github_gorilla_sessions",
importpath = "github.com/gorilla/sessions",
sum = "h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=",
version = "v1.2.1",
)
go_repository(
name = "com_github_gorilla_websocket",
importpath = "github.com/gorilla/websocket",
sum = "h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=",
version = "v1.4.2",
)
go_repository(
name = "com_github_graph_gophers_graphql_go",
importpath = "github.com/graph-gophers/graphql-go",
sum = "h1:wVVEPeC5IXelyaQ8UyWKugIyNIFOVF9Kn+gu/1/tXTE=",
version = "v1.1.0",
)
go_repository(
name = "com_github_gregjones_httpcache",
importpath = "github.com/gregjones/httpcache",
sum = "h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=",
version = "v0.0.0-20180305231024-9cad4c3443a7",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_middleware",
importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
sum = "h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=",
version = "v1.3.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_prometheus",
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
version = "v1.2.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway",
importpath = "github.com/grpc-ecosystem/grpc-gateway",
sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=",
version = "v1.16.0",
)
go_repository(
name = "com_github_hashicorp_consul_api",
importpath = "github.com/hashicorp/consul/api",
sum = "h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=",
version = "v1.1.0",
)
go_repository(
name = "com_github_hashicorp_consul_sdk",
importpath = "github.com/hashicorp/consul/sdk",
sum = "h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=",
version = "v0.1.1",
)
go_repository(
name = "com_github_hashicorp_errwrap",
importpath = "github.com/hashicorp/errwrap",
sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_cleanhttp",
importpath = "github.com/hashicorp/go-cleanhttp",
sum = "h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_go_hclog",
importpath = "github.com/hashicorp/go-hclog",
sum = "h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=",
version = "v0.14.1",
)
go_repository(
name = "com_github_hashicorp_go_immutable_radix",
importpath = "github.com/hashicorp/go-immutable-radix",
sum = "h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_msgpack",
importpath = "github.com/hashicorp/go-msgpack",
sum = "h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=",
version = "v1.1.5",
)
go_repository(
name = "com_github_hashicorp_go_multierror",
importpath = "github.com/hashicorp/go-multierror",
sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_net",
importpath = "github.com/hashicorp/go.net",
sum = "h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=",
version = "v0.0.1",
)
go_repository(
name = "com_github_hashicorp_go_retryablehttp",
importpath = "github.com/hashicorp/go-retryablehttp",
sum = "h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=",
version = "v0.5.3",
)
go_repository(
name = "com_github_hashicorp_go_rootcerts",
importpath = "github.com/hashicorp/go-rootcerts",
sum = "h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_sockaddr",
importpath = "github.com/hashicorp/go-sockaddr",
sum = "h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_syslog",
importpath = "github.com/hashicorp/go-syslog",
sum = "h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_uuid",
importpath = "github.com/hashicorp/go-uuid",
sum = "h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=",
version = "v1.0.1",
)
go_repository(
name = "com_github_hashicorp_go_version",
importpath = "github.com/hashicorp/go-version",
sum = "h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=",
version = "v1.2.0",
)
go_repository(
name = "com_github_hashicorp_golang_lru",
importpath = "github.com/hashicorp/golang-lru",
sum = "h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=",
version = "v0.5.4",
)
go_repository(
name = "com_github_hashicorp_hcl",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_logutils",
importpath = "github.com/hashicorp/logutils",
sum = "h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_mdns",
importpath = "github.com/hashicorp/mdns",
sum = "h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_memberlist",
importpath = "github.com/hashicorp/memberlist",
sum = "h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=",
version = "v0.1.3",
)
go_repository(
name = "com_github_hashicorp_raft",
importpath = "github.com/hashicorp/raft",
sum = "h1:mHzHIrF0S91d3A7RPBvuqkgB4d/7oFJZyvf1Q4m7GA0=",
version = "v1.2.0",
)
go_repository(
name = "com_github_hashicorp_raft_boltdb",
importpath = "github.com/hashicorp/raft-boltdb",
sum = "h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=",
version = "v0.0.0-20171010151810-6e5ba93211ea",
)
go_repository(
name = "com_github_hashicorp_serf",
importpath = "github.com/hashicorp/serf",
sum = "h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=",
version = "v0.8.2",
)
go_repository(
name = "com_github_hpcloud_tail",
importpath = "github.com/hpcloud/tail",
sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_huandu_xstrings",
importpath = "github.com/huandu/xstrings",
sum = "h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=",
version = "v1.3.1",
)
go_repository(
name = "com_github_hydrogen18_memlistener",
importpath = "github.com/hydrogen18/memlistener",
sum = "h1:EPRgaDqXpLFUJLXZdGLnBTy1l6CLiNAPnvn2l+kHit0=",
version = "v0.0.0-20141126152155-54553eb933fb",
)
go_repository(
name = "com_github_ianlancetaylor_cgosymbolizer",
importpath = "github.com/ianlancetaylor/cgosymbolizer",
sum = "h1:IpTHAzWv1pKDDWeJDY5VOHvqc2T9d3C8cPKEf2VPqHE=",
version = "v0.0.0-20200424224625-be1b05b0b279",
)
go_repository(
name = "com_github_ianlancetaylor_demangle",
importpath = "github.com/ianlancetaylor/demangle",
sum = "h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=",
version = "v0.0.0-20200824232613-28f6c0f3b639",
)
go_repository(
name = "com_github_imdario_mergo",
importpath = "github.com/imdario/mergo",
sum = "h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=",
version = "v0.3.11",
)
go_repository(
name = "com_github_imkira_go_interpol",
importpath = "github.com/imkira/go-interpol",
sum = "h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=",
version = "v1.1.0",
)
go_repository(
name = "com_github_inconshreveable_go_update",
importpath = "github.com/inconshreveable/go-update",
sum = "h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=",
version = "v0.0.0-20160112193335-8152e7eb6ccf",
)
go_repository(
name = "com_github_inconshreveable_mousetrap",
importpath = "github.com/inconshreveable/mousetrap",
sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_iris_contrib_blackfriday",
importpath = "github.com/iris-contrib/blackfriday",
sum = "h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_iris_contrib_go_uuid",
importpath = "github.com/iris-contrib/go.uuid",
sum = "h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_iris_contrib_i18n",
importpath = "github.com/iris-contrib/i18n",
sum = "h1:Kyp9KiXwsyZRTeoNjgVCrWks7D8ht9+kg6yCjh8K97o=",
version = "v0.0.0-20171121225848-987a633949d0",
)
go_repository(
name = "com_github_iris_contrib_schema",
importpath = "github.com/iris-contrib/schema",
sum = "h1:10g/WnoRR+U+XXHWKBHeNy/+tZmM2kcAVGLOsz+yaDA=",
version = "v0.0.1",
)
go_repository(
name = "com_github_j_keck_arping",
importpath = "github.com/j-keck/arping",
sum = "h1:742eGXur0715JMq73aD95/FU0XpVKXqNuTnEfXsLOYQ=",
version = "v0.0.0-20160618110441-2cf9dc699c56",
)
go_repository(
name = "com_github_jackc_fake",
importpath = "github.com/jackc/fake",
sum = "h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=",
version = "v0.0.0-20150926172116-812a484cc733",
)
go_repository(
name = "com_github_jackc_pgx",
importpath = "github.com/jackc/pgx",
sum = "h1:BRJ4G3UPtvml5R1ey0biqqGuYUGayMYekm3woO75orY=",
version = "v3.5.0+incompatible",
)
go_repository(
name = "com_github_jbenet_go_context",
importpath = "github.com/jbenet/go-context",
sum = "h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=",
version = "v0.0.0-20150711004518-d14ea06fba99",
)
go_repository(
name = "com_github_jessevdk_go_flags",
importpath = "github.com/jessevdk/go-flags",
sum = "h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=",
version = "v1.4.0",
)
go_repository(
name = "com_github_jmespath_go_jmespath",
importpath = "github.com/jmespath/go-jmespath",
sum = "h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=",
version = "v0.4.0",
)
go_repository(
name = "com_github_jmespath_go_jmespath_internal_testify",
importpath = "github.com/jmespath/go-jmespath/internal/testify",
sum = "h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=",
version = "v1.5.1",
)
go_repository(
name = "com_github_jmoiron_sqlx",
importpath = "github.com/jmoiron/sqlx",
sum = "h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=",
version = "v1.2.0",
)
go_repository(
name = "com_github_joho_godotenv",
importpath = "github.com/joho/godotenv",
sum = "h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=",
version = "v1.3.0",
)
go_repository(
name = "com_github_joker_hpp",
importpath = "github.com/Joker/hpp",
sum = "h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc=",
version = "v1.0.0",
)
go_repository(
name = "com_github_joker_jade",
importpath = "github.com/Joker/jade",
sum = "h1:mreN1m/5VJ/Zc3b4pzj9qU6D9SRQ6Vm+3KfI328t3S8=",
version = "v1.0.1-0.20190614124447-d475f43051e7",
)
go_repository(
name = "com_github_jonboulle_clockwork",
importpath = "github.com/jonboulle/clockwork",
sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_josharian_intern",
importpath = "github.com/josharian/intern",
sum = "h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jpillora_backoff",
importpath = "github.com/jpillora/backoff",
sum = "h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_json_iterator_go",
importpath = "github.com/json-iterator/go",
sum = "h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=",
version = "v1.1.11",
)
go_repository(
name = "com_github_jstemmer_go_junit_report",
importpath = "github.com/jstemmer/go-junit-report",
sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=",
version = "v0.9.1",
)
go_repository(
name = "com_github_jtolds_gls",
importpath = "github.com/jtolds/gls",
sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=",
version = "v4.20.0+incompatible",
)
go_repository(
name = "com_github_juju_errors",
importpath = "github.com/juju/errors",
sum = "h1:rhqTjzJlm7EbkELJDKMTU7udov+Se0xZkWmugr6zGok=",
version = "v0.0.0-20181118221551-089d3ea4e4d5",
)
go_repository(
name = "com_github_juju_loggo",
importpath = "github.com/juju/loggo",
sum = "h1:MK144iBQF9hTSwBW/9eJm034bVoG30IshVm688T2hi8=",
version = "v0.0.0-20180524022052-584905176618",
)
go_repository(
name = "com_github_juju_testing",
importpath = "github.com/juju/testing",
sum = "h1:WQM1NildKThwdP7qWrNAFGzp4ijNLw8RlgENkaI4MJs=",
version = "v0.0.0-20180920084828-472a3e8b2073",
)
go_repository(
name = "com_github_julienschmidt_httprouter",
importpath = "github.com/julienschmidt/httprouter",
sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=",
version = "v1.3.0",
)
go_repository(
name = "com_github_k0kubun_colorstring",
importpath = "github.com/k0kubun/colorstring",
sum = "h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=",
version = "v0.0.0-20150214042306-9440f1994b88",
)
go_repository(
name = "com_github_kardianos_osext",
importpath = "github.com/kardianos/osext",
sum = "h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=",
version = "v0.0.0-20190222173326-2bc1f35cddc0",
)
go_repository(
name = "com_github_karrick_godirwalk",
importpath = "github.com/karrick/godirwalk",
sum = "h1:lOpSw2vJP0y5eLBW906QwKsUK/fe/QDyoqM5rnnuPDY=",
version = "v1.10.3",
)
go_repository(
name = "com_github_kataras_golog",
importpath = "github.com/kataras/golog",
sum = "h1:J7Dl82843nbKQDrQM/abbNJZvQjS6PfmkkffhOTXEpM=",
version = "v0.0.9",
)
go_repository(
name = "com_github_kataras_iris_v12",
importpath = "github.com/kataras/iris/v12",
sum = "h1:Wo5S7GMWv5OAzJmvFTvss/C4TS1W0uo6LkDlSymT4rM=",
version = "v12.0.1",
)
go_repository(
name = "com_github_kataras_neffos",
importpath = "github.com/kataras/neffos",
sum = "h1:O06dvQlxjdWvzWbm2Bq+Si6psUhvSmEctAMk9Xujqms=",
version = "v0.0.10",
)
go_repository(
name = "com_github_kataras_pio",
importpath = "github.com/kataras/pio",
sum = "h1:V5Rs9ztEWdp58oayPq/ulmlqJJZeJP6pP79uP3qjcao=",
version = "v0.0.0-20190103105442-ea782b38602d",
)
go_repository(
name = "com_github_kevinburke_ssh_config",
importpath = "github.com/kevinburke/ssh_config",
sum = "h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=",
version = "v0.0.0-20190725054713-01f96b0aa0cd",
)
go_repository(
name = "com_github_kisielk_errcheck",
importpath = "github.com/kisielk/errcheck",
sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=",
version = "v1.5.0",
)
go_repository(
name = "com_github_kisielk_gotool",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_klauspost_compress",
importpath = "github.com/klauspost/compress",
sum = "h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=",
version = "v1.11.13",
)
go_repository(
name = "com_github_klauspost_cpuid",
importpath = "github.com/klauspost/cpuid",
sum = "h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=",
version = "v1.2.1",
)
go_repository(
name = "com_github_konsorten_go_windows_terminal_sequences",
importpath = "github.com/konsorten/go-windows-terminal-sequences",
sum = "h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=",
version = "v1.0.3",
)
go_repository(
name = "com_github_kr_fs",
importpath = "github.com/kr/fs",
sum = "h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_logfmt",
importpath = "github.com/kr/logfmt",
sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
version = "v0.0.0-20140226030751-b84e30acd515",
)
go_repository(
name = "com_github_kr_pretty",
importpath = "github.com/kr/pretty",
sum = "h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=",
version = "v0.2.1",
)
go_repository(
name = "com_github_kr_pty",
importpath = "github.com/kr/pty",
sum = "h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=",
version = "v1.1.8",
)
go_repository(
name = "com_github_kr_text",
importpath = "github.com/kr/text",
sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_kylelemons_godebug",
importpath = "github.com/kylelemons/godebug",
sum = "h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=",
version = "v1.1.0",
)
go_repository(
name = "com_github_labstack_echo_v4",
importpath = "github.com/labstack/echo/v4",
sum = "h1:z0BZoArY4FqdpUEl+wlHp4hnr/oSR6MTmQmv8OHSoww=",
version = "v4.1.11",
)
go_repository(
name = "com_github_labstack_gommon",
importpath = "github.com/labstack/gommon",
sum = "h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=",
version = "v0.3.0",
)
go_repository(
name = "com_github_lestrrat_go_backoff_v2",
importpath = "github.com/lestrrat-go/backoff/v2",
sum = "h1:i2SeK33aOFJlUNJZzf2IpXRBvqBBnaGXfY5Xaop/GsE=",
version = "v2.0.7",
)
go_repository(
name = "com_github_lestrrat_go_blackmagic",
importpath = "github.com/lestrrat-go/blackmagic",
sum = "h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lestrrat_go_codegen",
importpath = "github.com/lestrrat-go/codegen",
sum = "h1:gnWFHKvL64TTSFRghShUybm9UvBxFFXvnniE06JTO3k=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lestrrat_go_httpcc",
importpath = "github.com/lestrrat-go/httpcc",
sum = "h1:FszVC6cKfDvBKcJv646+lkh4GydQg2Z29scgUfkOpYc=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lestrrat_go_iter",
importpath = "github.com/lestrrat-go/iter",
sum = "h1:q8faalr2dY6o8bV45uwrxq12bRa1ezKrB6oM9FUgN4A=",
version = "v1.0.1",
)
go_repository(
name = "com_github_lestrrat_go_jwx",
importpath = "github.com/lestrrat-go/jwx",
sum = "h1:EuVGI/hPUSRstxWpWjVcklOe1odJLVrFY9zt4k1pa30=",
version = "v1.2.4",
)
go_repository(
name = "com_github_lestrrat_go_option",
importpath = "github.com/lestrrat-go/option",
sum = "h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lestrrat_go_pdebug_v3",
importpath = "github.com/lestrrat-go/pdebug/v3",
sum = "h1:3G5sX/aw/TbMTtVc9U7IHBWRZtMvwvBziF1e4HoQtv8=",
version = "v3.0.1",
)
go_repository(
name = "com_github_lib_pq",
importpath = "github.com/lib/pq",
sum = "h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=",
version = "v1.10.0",
)
go_repository(
name = "com_github_liggitt_tabwriter",
importpath = "github.com/liggitt/tabwriter",
sum = "h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=",
version = "v0.0.0-20181228230101-89fcab3d43de",
)
go_repository(
name = "com_github_lithammer_dedent",
importpath = "github.com/lithammer/dedent",
sum = "h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=",
version = "v1.1.0",
)
go_repository(
name = "com_github_lucasb_eyer_go_colorful",
importpath = "github.com/lucasb-eyer/go-colorful",
sum = "h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=",
version = "v1.0.3",
)
go_repository(
name = "com_github_magiconair_properties",
importpath = "github.com/magiconair/properties",
sum = "h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=",
version = "v1.8.5",
)
go_repository(
name = "com_github_mailru_easyjson",
importpath = "github.com/mailru/easyjson",
sum = "h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=",
version = "v0.7.6",
)
go_repository(
name = "com_github_makenowjust_heredoc",
importpath = "github.com/MakeNowJust/heredoc",
sum = "h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=",
version = "v0.0.0-20170808103936-bb23615498cd",
)
go_repository(
name = "com_github_markbates_oncer",
importpath = "github.com/markbates/oncer",
sum = "h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k=",
version = "v0.0.0-20181203154359-bf2de49a0be2",
)
go_repository(
name = "com_github_markbates_safe",
importpath = "github.com/markbates/safe",
sum = "h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_marstr_guid",
importpath = "github.com/marstr/guid",
sum = "h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_masterminds_goutils",
importpath = "github.com/Masterminds/goutils",
sum = "h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=",
version = "v1.1.1",
)
go_repository(
name = "com_github_masterminds_semver_v3",
importpath = "github.com/Masterminds/semver/v3",
sum = "h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=",
version = "v3.1.1",
)
go_repository(
name = "com_github_masterminds_sprig_v3",
importpath = "github.com/Masterminds/sprig/v3",
sum = "h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=",
version = "v3.2.2",
)
go_repository(
name = "com_github_mattn_go_colorable",
importpath = "github.com/mattn/go-colorable",
sum = "h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=",
version = "v0.1.8",
)
go_repository(
name = "com_github_mattn_go_isatty",
importpath = "github.com/mattn/go-isatty",
sum = "h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=",
version = "v0.0.12",
)
go_repository(
name = "com_github_mattn_go_runewidth",
importpath = "github.com/mattn/go-runewidth",
sum = "h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=",
version = "v0.0.9",
)
go_repository(
name = "com_github_mattn_go_shellwords",
importpath = "github.com/mattn/go-shellwords",
sum = "h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=",
version = "v1.0.3",
)
go_repository(
name = "com_github_mattn_go_sqlite3",
importpath = "github.com/mattn/go-sqlite3",
sum = "h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=",
version = "v1.9.0",
)
go_repository(
name = "com_github_mattn_goveralls",
importpath = "github.com/mattn/goveralls",
sum = "h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc=",
version = "v0.0.2",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=",
version = "v1.0.2-0.20181231171920-c182affec369",
)
go_repository(
name = "com_github_mediocregopher_mediocre_go_lib",
importpath = "github.com/mediocregopher/mediocre-go-lib",
sum = "h1:3dQJqqDouawQgl3gBE1PNHKFkJYGEuFb1DbSlaxdosE=",
version = "v0.0.0-20181029021733-cb65787f37ed",
)
go_repository(
name = "com_github_mediocregopher_radix_v3",
importpath = "github.com/mediocregopher/radix/v3",
sum = "h1:oacPXPKHJg0hcngVVrdtTnfGJiS+PtwoQwTBZGFlV4k=",
version = "v3.3.0",
)
go_repository(
name = "com_github_microcosm_cc_bluemonday",
importpath = "github.com/microcosm-cc/bluemonday",
sum = "h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=",
version = "v1.0.2",
)
go_repository(
name = "com_github_microsoft_go_winio",
importpath = "github.com/Microsoft/go-winio",
sum = "h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=",
version = "v0.4.17",
)
go_repository(
name = "com_github_microsoft_hcsshim",
importpath = "github.com/Microsoft/hcsshim",
sum = "h1:cYnKADiM1869gvBpos3YCteeT6sZLB48lB5dmMMs8Tg=",
version = "v0.8.18",
)
go_repository(
name = "com_github_microsoft_hcsshim_test",
importpath = "github.com/Microsoft/hcsshim/test",
sum = "h1:4FA+QBaydEHlwxg0lMN3rhwoDaQy6LKhVWR4qvq4BuA=",
version = "v0.0.0-20210227013316-43a75bb4edd3",
)
go_repository(
name = "com_github_miekg_dns",
importpath = "github.com/miekg/dns",
sum = "h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=",
version = "v1.0.14",
)
go_repository(
name = "com_github_miekg_pkcs11",
importpath = "github.com/miekg/pkcs11",
sum = "h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=",
version = "v1.0.3",
)
go_repository(
name = "com_github_minio_highwayhash",
importpath = "github.com/minio/highwayhash",
sum = "h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=",
version = "v1.0.1",
)
go_repository(
name = "com_github_mistifyio_go_zfs",
importpath = "github.com/mistifyio/go-zfs",
sum = "h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=",
version = "v2.1.2-0.20190413222219-f784269be439+incompatible",
)
go_repository(
name = "com_github_mitchellh_cli",
importpath = "github.com/mitchellh/cli",
sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_copystructure",
importpath = "github.com/mitchellh/copystructure",
sum = "h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_go_testing_interface",
importpath = "github.com/mitchellh/go-testing-interface",
sum = "h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_go_wordwrap",
importpath = "github.com/mitchellh/go-wordwrap",
sum = "h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_gox",
importpath = "github.com/mitchellh/gox",
sum = "h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=",
version = "v0.4.0",
)
go_repository(
name = "com_github_mitchellh_iochan",
importpath = "github.com/mitchellh/iochan",
sum = "h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=",
version = "v1.4.1",
)
go_repository(
name = "com_github_mitchellh_osext",
importpath = "github.com/mitchellh/osext",
sum = "h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=",
version = "v0.0.0-20151018003038-5e2d6d41470f",
)
go_repository(
name = "com_github_mitchellh_reflectwalk",
importpath = "github.com/mitchellh/reflectwalk",
sum = "h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mmcloughlin_avo",
importpath = "github.com/mmcloughlin/avo",
sum = "h1:D4I34fbgczGrhrN4DzBCZXT3u/nMWJnGmviIjSzzXSw=",
version = "v0.0.0-20201105074841-5d2f697d268f",
)
go_repository(
name = "com_github_moby_locker",
importpath = "github.com/moby/locker",
sum = "h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=",
version = "v1.0.1",
)
go_repository(
name = "com_github_moby_sys_mountinfo",
importpath = "github.com/moby/sys/mountinfo",
sum = "h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=",
version = "v0.4.1",
)
go_repository(
name = "com_github_moby_sys_symlink",
importpath = "github.com/moby/sys/symlink",
sum = "h1:MTFZ74KtNI6qQQpuBxU+uKCim4WtOMokr03hCfJcazE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_moby_term",
importpath = "github.com/moby/term",
sum = "h1:SPoLlS9qUUnXcIY4pvA4CTwYjk0Is5f4UPEkeESr53k=",
version = "v0.0.0-20200915141129-7f0af18e79f2",
)
go_repository(
name = "com_github_modern_go_concurrent",
importpath = "github.com/modern-go/concurrent",
sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=",
version = "v0.0.0-20180306012644-bacd9c7ef1dd",
)
go_repository(
name = "com_github_modern_go_reflect2",
importpath = "github.com/modern-go/reflect2",
sum = "h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_montanaflynn_stats",
importpath = "github.com/montanaflynn/stats",
sum = "h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=",
version = "v0.0.0-20171201202039-1bf9dbcd8cbe",
)
go_repository(
name = "com_github_moul_http2curl",
importpath = "github.com/moul/http2curl",
sum = "h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mrunalp_fileutils",
importpath = "github.com/mrunalp/fileutils",
sum = "h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=",
version = "v0.5.0",
)
go_repository(
name = "com_github_munnerz_goautoneg",
importpath = "github.com/munnerz/goautoneg",
sum = "h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=",
version = "v0.0.0-20191010083416-a7dc8b61c822",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=",
version = "v0.0.0-20190716064945-2f068394615f",
)
go_repository(
name = "com_github_mxk_go_flowrate",
importpath = "github.com/mxk/go-flowrate",
sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=",
version = "v0.0.0-20140419014527-cca7078d478f",
)
go_repository(
name = "com_github_nats_io_jwt",
importpath = "github.com/nats-io/jwt",
sum = "h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=",
version = "v1.2.2",
)
go_repository(
name = "com_github_nats_io_jwt_v2",
importpath = "github.com/nats-io/jwt/v2",
sum = "h1:SycklijeduR742i/1Y3nRhURYM7imDzZZ3+tuAQqhQA=",
version = "v2.0.1",
)
go_repository(
name = "com_github_nats_io_nats_go",
importpath = "github.com/nats-io/nats.go",
replace = "github.com/nats-io/nats.go",
sum = "h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY=",
version = "v1.10.0",
)
go_repository(
name = "com_github_nats_io_nats_server_v2",
importpath = "github.com/nats-io/nats-server/v2",
replace = "github.com/nats-io/nats-server/v2",
sum = "h1:QNeFmJRBq+O2zF8EmsR/JSvtL2zXb3GwICloHgskYBU=",
version = "v2.2.0",
)
go_repository(
name = "com_github_nats_io_nats_streaming_server",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/nats-streaming-server",
sum = "h1:jb/osnXmFJtKDS9DFghDjX82v1NT9IhaoR/r6s6toNg=",
version = "v0.21.1",
)
go_repository(
name = "com_github_nats_io_nkeys",
importpath = "github.com/nats-io/nkeys",
sum = "h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=",
version = "v0.3.0",
)
go_repository(
name = "com_github_nats_io_nuid",
importpath = "github.com/nats-io/nuid",
sum = "h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=",
version = "v1.0.1",
)
go_repository(
name = "com_github_nats_io_stan_go",
build_file_generation = "on",
build_file_proto_mode = "disable",
importpath = "github.com/nats-io/stan.go",
sum = "h1:XyemjL9vAeGHooHn5RQy+ngljd8AVSM2l65Jdnpv4rI=",
version = "v0.8.3",
)
go_repository(
name = "com_github_ncw_swift",
importpath = "github.com/ncw/swift",
sum = "h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=",
version = "v1.0.47",
)
go_repository(
name = "com_github_niemeyer_pretty",
importpath = "github.com/niemeyer/pretty",
sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=",
version = "v0.0.0-20200227124842-a10e7caefd8e",
)
go_repository(
name = "com_github_nvveen_gotty",
importpath = "github.com/Nvveen/Gotty",
sum = "h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=",
version = "v0.0.0-20120604004816-cd527374f1e5",
)
go_repository(
name = "com_github_nxadm_tail",
importpath = "github.com/nxadm/tail",
sum = "h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=",
version = "v1.4.4",
)
go_repository(
name = "com_github_nytimes_gziphandler",
importpath = "github.com/NYTimes/gziphandler",
sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=",
version = "v0.0.0-20170623195520-56545f4a5d46",
)
go_repository(
name = "com_github_olekukonko_tablewriter",
importpath = "github.com/olekukonko/tablewriter",
sum = "h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=",
version = "v0.0.5",
)
go_repository(
name = "com_github_olivere_elastic_v7",
importpath = "github.com/olivere/elastic/v7",
sum = "h1:91kj/UMKWQt8VAHBm5BDHpVmzdfPCmICaUFy2oH4LkQ=",
version = "v7.0.12",
)
go_repository(
name = "com_github_oneofone_xxhash",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
version = "v1.2.2",
)
go_repository(
name = "com_github_onsi_ginkgo",
importpath = "github.com/onsi/ginkgo",
sum = "h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=",
version = "v1.14.1",
)
go_repository(
name = "com_github_onsi_gomega",
importpath = "github.com/onsi/gomega",
sum = "h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=",
version = "v1.10.3",
)
go_repository(
name = "com_github_opencontainers_go_digest",
importpath = "github.com/opencontainers/go-digest",
sum = "h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=",
version = "v1.0.0",
)
go_repository(
name = "com_github_opencontainers_image_spec",
importpath = "github.com/opencontainers/image-spec",
sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_opencontainers_runc",
importpath = "github.com/opencontainers/runc",
sum = "h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=",
version = "v1.0.1",
)
go_repository(
name = "com_github_opencontainers_runtime_spec",
importpath = "github.com/opencontainers/runtime-spec",
sum = "h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=",
version = "v1.0.3-0.20210326190908-1c3f411f0417",
)
go_repository(
name = "com_github_opencontainers_runtime_tools",
importpath = "github.com/opencontainers/runtime-tools",
sum = "h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=",
version = "v0.0.0-20181011054405-1d69bd0f9c39",
)
go_repository(
name = "com_github_opencontainers_selinux",
importpath = "github.com/opencontainers/selinux",
sum = "h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc=",
version = "v1.8.2",
)
go_repository(
name = "com_github_opentracing_opentracing_go",
importpath = "github.com/opentracing/opentracing-go",
sum = "h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=",
version = "v1.2.0",
)
go_repository(
name = "com_github_ory_dockertest_v3",
importpath = "github.com/ory/dockertest/v3",
sum = "h1:L8JWiGgR+fnj90AEOkTFIEp4j5uWAK72P3IUsYgn2cs=",
version = "v3.6.3",
)
go_repository(
name = "com_github_ory_hydra_client_go",
importpath = "github.com/ory/hydra-client-go",
sum = "h1:sbp+8zwEJvhqSxcY8HiOkXeY2FspsfSOJ5ajJ07xPQo=",
version = "v1.9.2",
)
go_repository(
name = "com_github_ory_kratos_client_go",
importpath = "github.com/ory/kratos-client-go",
sum = "h1:GHfgWVYqJwYj7aitzLOpy8aiTfywb/GjOVJc3AUuQmI=",
version = "v0.5.4-alpha.1",
)
go_repository(
name = "com_github_pascaldekloe_goe",
importpath = "github.com/pascaldekloe/goe",
sum = "h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_pelletier_go_buffruneio",
importpath = "github.com/pelletier/go-buffruneio",
sum = "h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=",
version = "v0.2.0",
)
go_repository(
name = "com_github_pelletier_go_toml",
importpath = "github.com/pelletier/go-toml",
sum = "h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=",
version = "v1.9.3",
)
go_repository(
name = "com_github_peterbourgon_diskv",
importpath = "github.com/peterbourgon/diskv",
sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=",
version = "v2.0.1+incompatible",
)
go_repository(
name = "com_github_peterh_liner",
importpath = "github.com/peterh/liner",
sum = "h1:8uaXtUkxiy+T/zdLWuxa/PG4so0TPZDZfafFNNSaptE=",
version = "v0.0.0-20170317030525-88609521dc4b",
)
go_repository(
name = "com_github_phayes_freeport",
importpath = "github.com/phayes/freeport",
sum = "h1:rZQtoozkfsiNs36c7Tdv/gyGNzD1X1XWKO8rptVNZuM=",
version = "v0.0.0-20171002181615-b8543db493a5",
)
go_repository(
name = "com_github_pingcap_errors",
importpath = "github.com/pingcap/errors",
sum = "h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=",
version = "v0.11.4",
)
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
version = "v0.9.1",
)
go_repository(
name = "com_github_pkg_sftp",
importpath = "github.com/pkg/sftp",
sum = "h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=",
version = "v1.10.1",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_posener_complete",
importpath = "github.com/posener/complete",
sum = "h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=",
version = "v1.1.1",
)
go_repository(
name = "com_github_pquerna_cachecontrol",
importpath = "github.com/pquerna/cachecontrol",
sum = "h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM=",
version = "v0.0.0-20171018203845-0dec1b30a021",
)
go_repository(
name = "com_github_prometheus_client_golang",
importpath = "github.com/prometheus/client_golang",
sum = "h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=",
version = "v1.11.0",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=",
version = "v0.2.0",
)
go_repository(
name = "com_github_prometheus_common",
importpath = "github.com/prometheus/common",
sum = "h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=",
version = "v0.26.0",
)
go_repository(
name = "com_github_prometheus_procfs",
importpath = "github.com/prometheus/procfs",
sum = "h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=",
version = "v0.6.0",
)
go_repository(
name = "com_github_puerkitobio_goquery",
importpath = "github.com/PuerkitoBio/goquery",
sum = "h1:j7taAbelrdcsOlGeMenZxc2AWXD5fieT1/znArdnx94=",
version = "v1.6.0",
)
go_repository(
name = "com_github_puerkitobio_purell",
importpath = "github.com/PuerkitoBio/purell",
sum = "h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=",
version = "v1.1.1",
)
go_repository(
name = "com_github_puerkitobio_urlesc",
importpath = "github.com/PuerkitoBio/urlesc",
sum = "h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=",
version = "v0.0.0-20170810143723-de5bf2ad4578",
)
go_repository(
name = "com_github_rivo_tview",
importpath = "github.com/rivo/tview",
sum = "h1:Jfm2O5tRzzHt5LeM9F4AuwcNGxCH7erPl8GeVOzJKd0=",
version = "v0.0.0-20200404204604-ca37f83cb2e7",
)
go_repository(
name = "com_github_rivo_uniseg",
importpath = "github.com/rivo/uniseg",
sum = "h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
version = "v1.3.0",
)
go_repository(
name = "com_github_russross_blackfriday",
importpath = "github.com/russross/blackfriday",
sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=",
version = "v1.5.2",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_ryanuber_columnize",
importpath = "github.com/ryanuber/columnize",
sum = "h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_safchain_ethtool",
importpath = "github.com/safchain/ethtool",
sum = "h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=",
version = "v0.0.0-20190326074333-42ed695e3de8",
)
go_repository(
name = "com_github_sahilm_fuzzy",
importpath = "github.com/sahilm/fuzzy",
sum = "h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI=",
version = "v0.1.0",
)
go_repository(
name = "com_github_satori_go_uuid",
importpath = "github.com/satori/go.uuid",
sum = "h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=",
version = "v1.2.0",
)
go_repository(
name = "com_github_sclevine_agouti",
importpath = "github.com/sclevine/agouti",
sum = "h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4=",
version = "v3.0.0+incompatible",
)
go_repository(
name = "com_github_sean_seed",
importpath = "github.com/sean-/seed",
sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=",
version = "v0.0.0-20170313163322-e2103e2c3529",
)
go_repository(
name = "com_github_seccomp_libseccomp_golang",
importpath = "github.com/seccomp/libseccomp-golang",
sum = "h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=",
version = "v0.9.1",
)
go_repository(
name = "com_github_segmentio_backo_go",
importpath = "github.com/segmentio/backo-go",
sum = "h1:ZuhckGJ10ulaKkdvJtiAqsLTiPrLaXSdnVgXJKJkTxE=",
version = "v0.0.0-20200129164019-23eae7c10bd3",
)
go_repository(
name = "com_github_sercand_kuberesolver_v3",
importpath = "github.com/sercand/kuberesolver/v3",
patch_args = ["-p1"],
patches = [
"//bazel/external:kuberesolver.patch",
],
sum = "h1:3PY7ntZyEzUhMri5sc9uX83mZ0QnlNAqlXS7l0anRiA=",
version = "v3.0.0",
)
go_repository(
name = "com_github_sergi_go_diff",
importpath = "github.com/sergi/go-diff",
sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=",
version = "v1.1.0",
)
go_repository(
name = "com_github_shopify_goreferrer",
importpath = "github.com/Shopify/goreferrer",
sum = "h1:WDC6ySpJzbxGWFh4aMxFFC28wwGp5pEuoTtvA4q/qQ4=",
version = "v0.0.0-20181106222321-ec9c9a553398",
)
go_repository(
name = "com_github_shopify_logrus_bugsnag",
importpath = "github.com/Shopify/logrus-bugsnag",
sum = "h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=",
version = "v0.0.0-20171204204709-577dee27f20d",
)
go_repository(
name = "com_github_shopspring_decimal",
importpath = "github.com/shopspring/decimal",
sum = "h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=",
version = "v1.2.0",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_sirupsen_logrus",
importpath = "github.com/sirupsen/logrus",
sum = "h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=",
version = "v1.8.1",
)
go_repository(
name = "com_github_skratchdot_open_golang",
importpath = "github.com/skratchdot/open-golang",
sum = "h1:VAzdS5Nw68fbf5RZ8RDVlUvPXNU6Z3jtPCK/qvm4FoQ=",
version = "v0.0.0-20190402232053-79abb63cd66e",
)
go_repository(
name = "com_github_smartystreets_assertions",
importpath = "github.com/smartystreets/assertions",
sum = "h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=",
version = "v1.0.1",
)
go_repository(
name = "com_github_smartystreets_go_aws_auth",
importpath = "github.com/smartystreets/go-aws-auth",
sum = "h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q=",
version = "v0.0.0-20180515143844-0c1422d1fdb9",
)
go_repository(
name = "com_github_smartystreets_goconvey",
importpath = "github.com/smartystreets/goconvey",
sum = "h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=",
version = "v1.6.4",
)
go_repository(
name = "com_github_smartystreets_gunit",
importpath = "github.com/smartystreets/gunit",
sum = "h1:32x+htJCu3aMswhPw3teoJ+PnWPONqdNgaGs6Qt8ZaU=",
version = "v1.1.3",
)
go_repository(
name = "com_github_soheilhy_cmux",
importpath = "github.com/soheilhy/cmux",
sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
version = "v0.1.4",
)
go_repository(
name = "com_github_spaolacci_murmur3",
importpath = "github.com/spaolacci/murmur3",
sum = "h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_spf13_afero",
importpath = "github.com/spf13/afero",
sum = "h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=",
version = "v1.6.0",
)
go_repository(
name = "com_github_spf13_cast",
importpath = "github.com/spf13/cast",
sum = "h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=",
version = "v1.3.1",
)
go_repository(
name = "com_github_spf13_cobra",
importpath = "github.com/spf13/cobra",
replace = "github.com/spf13/cobra",
sum = "h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=",
version = "v1.2.1",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=",
version = "v1.1.0",
)
go_repository(
name = "com_github_spf13_pflag",
importpath = "github.com/spf13/pflag",
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_spf13_viper",
importpath = "github.com/spf13/viper",
replace = "github.com/spf13/viper",
sum = "h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=",
version = "v1.8.1",
)
go_repository(
name = "com_github_src_d_gcfg",
importpath = "github.com/src-d/gcfg",
sum = "h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=",
version = "v1.4.0",
)
go_repository(
name = "com_github_stefanberger_go_pkcs11uri",
importpath = "github.com/stefanberger/go-pkcs11uri",
sum = "h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=",
version = "v0.0.0-20201008174630-78d3cae3a980",
)
go_repository(
name = "com_github_stoewer_go_strcase",
importpath = "github.com/stoewer/go-strcase",
sum = "h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=",
version = "v1.2.0",
)
go_repository(
name = "com_github_stretchr_objx",
importpath = "github.com/stretchr/objx",
sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=",
version = "v0.2.0",
)
go_repository(
name = "com_github_stretchr_testify",
importpath = "github.com/stretchr/testify",
sum = "h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=",
version = "v1.7.0",
)
go_repository(
name = "com_github_subosito_gotenv",
importpath = "github.com/subosito/gotenv",
sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_syndtr_gocapability",
importpath = "github.com/syndtr/gocapability",
sum = "h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=",
version = "v0.0.0-20200815063812-42c35b437635",
)
go_repository(
name = "com_github_tchap_go_patricia",
importpath = "github.com/tchap/go-patricia",
sum = "h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=",
version = "v2.2.6+incompatible",
)
go_repository(
name = "com_github_tidwall_btree",
importpath = "github.com/tidwall/btree",
sum = "h1:aLwwJlG+InuFzdAPuBf9YCAR1LvSQ9zhC5aorFPlIPs=",
version = "v0.4.2",
)
go_repository(
name = "com_github_tidwall_buntdb",
importpath = "github.com/tidwall/buntdb",
sum = "h1:+s5TqjuAfo6IdMa8boDkCx5PzT2mlP5AwgNN4yxJba8=",
version = "v1.2.1",
)
go_repository(
name = "com_github_tidwall_gjson",
importpath = "github.com/tidwall/gjson",
sum = "h1:19cchw8FOxkG5mdLRkGf9jqIqEyqdZhPqW60XfyFxk8=",
version = "v1.7.4",
)
go_repository(
name = "com_github_tidwall_grect",
importpath = "github.com/tidwall/grect",
sum = "h1:+kMEkxhoqB7rniVXzMEIA66XwU07STgINqxh+qVIndY=",
version = "v0.1.1",
)
go_repository(
name = "com_github_tidwall_match",
importpath = "github.com/tidwall/match",
sum = "h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=",
version = "v1.0.3",
)
go_repository(
name = "com_github_tidwall_pretty",
importpath = "github.com/tidwall/pretty",
sum = "h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=",
version = "v1.1.0",
)
go_repository(
name = "com_github_tidwall_rtred",
importpath = "github.com/tidwall/rtred",
sum = "h1:exmoQtOLvDoO8ud++6LwVsAMTu0KPzLTUrMln8u1yu8=",
version = "v0.1.2",
)
go_repository(
name = "com_github_tidwall_tinyqueue",
importpath = "github.com/tidwall/tinyqueue",
sum = "h1:SpNEvEggbpyN5DIReaJ2/1ndroY8iyEGxPYxoSaymYE=",
version = "v0.1.1",
)
go_repository(
name = "com_github_tmc_grpc_websocket_proxy",
importpath = "github.com/tmc/grpc-websocket-proxy",
sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
version = "v0.0.0-20190109142713-0ad062ec5ee5",
)
go_repository(
name = "com_github_tv42_httpunix",
importpath = "github.com/tv42/httpunix",
sum = "h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=",
version = "v0.0.0-20150427012821-b75d8614f926",
)
go_repository(
name = "com_github_twitchyliquid64_golang_asm",
importpath = "github.com/twitchyliquid64/golang-asm",
sum = "h1:WYZ15YKpC5xM8PwpBTDsAgemoLB/lyhRkzJSEw9eAew=",
version = "v0.15.0",
)
go_repository(
name = "com_github_txn2_txeh",
importpath = "github.com/txn2/txeh",
sum = "h1:Rtmx8+1FfZ9VJ3o7r6nf/iswGg8eNth/QYMlHqb0hPA=",
version = "v1.2.1",
)
go_repository(
name = "com_github_ugorji_go",
importpath = "github.com/ugorji/go",
sum = "h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=",
version = "v1.1.7",
)
go_repository(
name = "com_github_ugorji_go_codec",
importpath = "github.com/ugorji/go/codec",
sum = "h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=",
version = "v1.1.7",
)
go_repository(
name = "com_github_urfave_cli",
importpath = "github.com/urfave/cli",
sum = "h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=",
version = "v1.22.2",
)
go_repository(
name = "com_github_urfave_negroni",
importpath = "github.com/urfave/negroni",
sum = "h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=",
version = "v1.0.0",
)
go_repository(
name = "com_github_valyala_bytebufferpool",
importpath = "github.com/valyala/bytebufferpool",
sum = "h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=",
version = "v1.0.0",
)
go_repository(
name = "com_github_valyala_fasthttp",
importpath = "github.com/valyala/fasthttp",
sum = "h1:uWF8lgKmeaIewWVPwi4GRq2P6+R46IgYZdxWtM+GtEY=",
version = "v1.6.0",
)
go_repository(
name = "com_github_valyala_fasttemplate",
importpath = "github.com/valyala/fasttemplate",
sum = "h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=",
version = "v1.0.1",
)
go_repository(
name = "com_github_valyala_tcplisten",
importpath = "github.com/valyala/tcplisten",
sum = "h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=",
version = "v0.0.0-20161114210144-ceec8f93295a",
)
go_repository(
name = "com_github_vbauerster_mpb_v4",
importpath = "github.com/vbauerster/mpb/v4",
sum = "h1:QdSmlc4dUap9XugHWx84yi7ABstYHW1rC5slzDwfXnw=",
version = "v4.11.0",
)
go_repository(
name = "com_github_vektah_gqlparser",
importpath = "github.com/vektah/gqlparser",
sum = "h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68=",
version = "v1.1.2",
)
go_repository(
name = "com_github_vishvananda_netlink",
importpath = "github.com/vishvananda/netlink",
sum = "h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=",
version = "v1.1.1-0.20201029203352-d40f9887b852",
)
go_repository(
name = "com_github_vishvananda_netns",
importpath = "github.com/vishvananda/netns",
sum = "h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=",
version = "v0.0.0-20200728191858-db3c7e526aae",
)
go_repository(
name = "com_github_vividcortex_ewma",
importpath = "github.com/VividCortex/ewma",
sum = "h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=",
version = "v1.1.1",
)
go_repository(
name = "com_github_willf_bitset",
importpath = "github.com/willf/bitset",
sum = "h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=",
version = "v1.1.11",
)
go_repository(
name = "com_github_xanzy_ssh_agent",
importpath = "github.com/xanzy/ssh-agent",
sum = "h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=",
version = "v0.2.1",
)
go_repository(
name = "com_github_xdg_go_pbkdf2",
importpath = "github.com/xdg-go/pbkdf2",
sum = "h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=",
version = "v1.0.0",
)
go_repository(
name = "com_github_xdg_go_scram",
importpath = "github.com/xdg-go/scram",
sum = "h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w=",
version = "v1.0.2",
)
go_repository(
name = "com_github_xdg_go_stringprep",
importpath = "github.com/xdg-go/stringprep",
sum = "h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=",
version = "v1.0.2",
)
go_repository(
name = "com_github_xeipuuv_gojsonpointer",
importpath = "github.com/xeipuuv/gojsonpointer",
sum = "h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=",
version = "v0.0.0-20180127040702-4e3ac2762d5f",
)
go_repository(
name = "com_github_xeipuuv_gojsonreference",
importpath = "github.com/xeipuuv/gojsonreference",
sum = "h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=",
version = "v0.0.0-20180127040603-bd5ef7bd5415",
)
go_repository(
name = "com_github_xeipuuv_gojsonschema",
importpath = "github.com/xeipuuv/gojsonschema",
sum = "h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=",
version = "v1.2.0",
)
go_repository(
name = "com_github_xiang90_probing",
importpath = "github.com/xiang90/probing",
sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
version = "v0.0.0-20190116061207-43a291ad63a2",
)
go_repository(
name = "com_github_xtgo_uuid",
importpath = "github.com/xtgo/uuid",
sum = "h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=",
version = "v0.0.0-20140804021211-a0b114877d4c",
)
go_repository(
name = "com_github_yalp_jsonpath",
importpath = "github.com/yalp/jsonpath",
sum = "h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=",
version = "v0.0.0-20180802001716-5cc68e5049a0",
)
go_repository(
name = "com_github_youmark_pkcs8",
importpath = "github.com/youmark/pkcs8",
sum = "h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=",
version = "v0.0.0-20181117223130-1be2e3e5546d",
)
go_repository(
name = "com_github_yudai_gojsondiff",
importpath = "github.com/yudai/gojsondiff",
sum = "h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_yudai_golcs",
importpath = "github.com/yudai/golcs",
sum = "h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=",
version = "v0.0.0-20170316035057-ecda9a501e82",
)
go_repository(
name = "com_github_yudai_pp",
importpath = "github.com/yudai/pp",
sum = "h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=",
version = "v2.0.1+incompatible",
)
go_repository(
name = "com_github_yuin_goldmark",
importpath = "github.com/yuin/goldmark",
sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=",
version = "v1.2.1",
)
go_repository(
name = "com_github_yvasiyarov_go_metrics",
importpath = "github.com/yvasiyarov/go-metrics",
sum = "h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=",
version = "v0.0.0-20140926110328-57bccd1ccd43",
)
go_repository(
name = "com_github_yvasiyarov_gorelic",
importpath = "github.com/yvasiyarov/gorelic",
sum = "h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=",
version = "v0.0.0-20141212073537-a9bba5b9ab50",
)
go_repository(
name = "com_github_yvasiyarov_newrelic_platform_go",
importpath = "github.com/yvasiyarov/newrelic_platform_go",
sum = "h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=",
version = "v0.0.0-20140908184405-b21fdbd4370f",
)
go_repository(
name = "com_github_zenazn_goji",
importpath = "github.com/zenazn/goji",
sum = "h1:mXV20Aj/BdWrlVzIn1kXFa+Tq62INlUi0cFFlztTaK0=",
version = "v0.9.1-0.20160507202103-64eb34159fe5",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
replace = "cloud.google.com/go",
sum = "h1:kAdyAMrj9CjqOSGiluseVjIgAyQ3uxADYtUYR6MwYeY=",
version = "v0.80.0",
)
go_repository(
name = "com_google_cloud_go_bigquery",
importpath = "cloud.google.com/go/bigquery",
sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=",
version = "v1.8.0",
)
go_repository(
name = "com_google_cloud_go_datastore",
importpath = "cloud.google.com/go/datastore",
sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=",
version = "v1.1.0",
)
go_repository(
name = "com_google_cloud_go_firestore",
importpath = "cloud.google.com/go/firestore",
sum = "h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=",
version = "v1.1.0",
)
go_repository(
name = "com_google_cloud_go_pubsub",
importpath = "cloud.google.com/go/pubsub",
sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=",
version = "v1.3.1",
)
go_repository(
name = "com_google_cloud_go_storage",
importpath = "cloud.google.com/go/storage",
sum = "h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=",
version = "v1.10.0",
)
go_repository(
name = "com_shuralyov_dmitri_gpu_mtl",
importpath = "dmitri.shuralyov.com/gpu/mtl",
sum = "h1:+PdD6GLKejR9DizMAKT5DpSAkKswvZrurk1/eEt9+pw=",
version = "v0.0.0-20201218220906-28db891af037",
)
go_repository(
name = "in_gopkg_airbrake_gobrake_v2",
importpath = "gopkg.in/airbrake/gobrake.v2",
sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=",
version = "v2.0.9",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=",
version = "v1.0.0-20200227125254-8fa46927fb4f",
)
go_repository(
name = "in_gopkg_cheggaaa_pb_v1",
importpath = "gopkg.in/cheggaaa/pb.v1",
sum = "h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=",
version = "v1.0.25",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2",
importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=",
version = "v2.1.2",
)
go_repository(
name = "in_gopkg_go_playground_assert_v1",
importpath = "gopkg.in/go-playground/assert.v1",
sum = "h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=",
version = "v1.2.1",
)
go_repository(
name = "in_gopkg_go_playground_validator_v8",
importpath = "gopkg.in/go-playground/validator.v8",
sum = "h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=",
version = "v8.18.2",
)
go_repository(
name = "in_gopkg_inf_v0",
importpath = "gopkg.in/inf.v0",
sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
version = "v0.9.1",
)
go_repository(
name = "in_gopkg_ini_v1",
importpath = "gopkg.in/ini.v1",
sum = "h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=",
version = "v1.62.0",
)
go_repository(
name = "in_gopkg_mgo_v2",
importpath = "gopkg.in/mgo.v2",
sum = "h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=",
version = "v2.0.0-20180705113604-9856a29383ce",
)
go_repository(
name = "in_gopkg_natefinch_lumberjack_v2",
importpath = "gopkg.in/natefinch/lumberjack.v2",
sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
version = "v2.0.0",
)
go_repository(
name = "in_gopkg_resty_v1",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "in_gopkg_segmentio_analytics_go_v3",
importpath = "gopkg.in/segmentio/analytics-go.v3",
sum = "h1:UzxH1uaGZRpMKDhJyBz0pexz6yUoBU3x8bJsRk/HV6U=",
version = "v3.1.0",
)
go_repository(
name = "in_gopkg_square_go_jose_v2",
importpath = "gopkg.in/square/go-jose.v2",
sum = "h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=",
version = "v2.5.1",
)
go_repository(
name = "in_gopkg_src_d_go_billy_v4",
importpath = "gopkg.in/src-d/go-billy.v4",
sum = "h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=",
version = "v4.3.2",
)
go_repository(
name = "in_gopkg_src_d_go_git_fixtures_v3",
importpath = "gopkg.in/src-d/go-git-fixtures.v3",
sum = "h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=",
version = "v3.5.0",
)
go_repository(
name = "in_gopkg_src_d_go_git_v4",
importpath = "gopkg.in/src-d/go-git.v4",
sum = "h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=",
version = "v4.13.1",
)
go_repository(
name = "in_gopkg_tomb_v1",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "in_gopkg_warnings_v0",
importpath = "gopkg.in/warnings.v0",
sum = "h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=",
version = "v0.1.2",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
replace = "gopkg.in/yaml.v2",
sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=",
version = "v2.4.0",
)
go_repository(
name = "in_gopkg_yaml_v3",
importpath = "gopkg.in/yaml.v3",
sum = "h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=",
version = "v3.0.0-20210107192922-496545a6307b",
)
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=",
version = "v1.3.5",
)
go_repository(
name = "io_etcd_go_etcd",
importpath = "go.etcd.io/etcd",
sum = "h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=",
version = "v0.5.0-alpha.5.0.20200910180754-dd1b699fc489",
)
go_repository(
name = "io_etcd_go_etcd_api_v3",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/etcd/api/v3",
sum = "h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=",
version = "v3.5.0",
)
go_repository(
name = "io_etcd_go_etcd_client_pkg_v3",
importpath = "go.etcd.io/etcd/client/pkg/v3",
sum = "h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=",
version = "v3.5.0",
)
go_repository(
name = "io_etcd_go_etcd_client_v2",
importpath = "go.etcd.io/etcd/client/v2",
sum = "h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=",
version = "v2.305.0",
)
go_repository(
name = "io_etcd_go_etcd_client_v3",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/etcd/client/v3",
sum = "h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek=",
version = "v3.5.0",
)
go_repository(
name = "io_k8s_api",
build_file_proto_mode = "disable",
importpath = "k8s.io/api",
sum = "h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_apiextensions_apiserver",
build_file_proto_mode = "disable",
importpath = "k8s.io/apiextensions-apiserver",
sum = "h1:ZrXQeslal+6zKM/HjDXLzThlz/vPSxrfK3OqL8txgVQ=",
version = "v0.20.1",
)
go_repository(
name = "io_k8s_apimachinery",
build_file_proto_mode = "disable",
importpath = "k8s.io/apimachinery",
sum = "h1:R5p3SlhaABYShQSO6LpPsYHjV05Q+79eBUR0Ut/f4tk=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_apiserver",
importpath = "k8s.io/apiserver",
sum = "h1:NnVriMMOpqQX+dshbDoZixqmBhfgrPk2uOh2fzp9vHE=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_cli_runtime",
importpath = "k8s.io/cli-runtime",
sum = "h1:ZRrIC1ly/f86BlF0r22KSMtKo3xbeYegAqaH/tEen94=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_client_go",
importpath = "k8s.io/client-go",
sum = "h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_code_generator",
importpath = "k8s.io/code-generator",
sum = "h1:kp65Y6kF6A4+5PvSNvXWSI5p5vuA9tUxEqEZciPw+7Q=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_component_base",
importpath = "k8s.io/component-base",
sum = "h1:G0inASS5vAqCpzs7M4Sp9dv9d0aElpz39zDHbSB4f4g=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_component_helpers",
importpath = "k8s.io/component-helpers",
sum = "h1:lp+Y2AFn+gAEEXl+DbOuLgeWGVwJaF/X1o3I9iLHebE=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_cri_api",
importpath = "k8s.io/cri-api",
sum = "h1:iXX0K2pRrbR8yXbZtDK/bSnmg/uSqIFiVJK1x4LUOMc=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_gengo",
importpath = "k8s.io/gengo",
sum = "h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=",
version = "v0.0.0-20201113003025-83324d819ded",
)
go_repository(
name = "io_k8s_klog_v2",
importpath = "k8s.io/klog/v2",
sum = "h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=",
version = "v2.8.0",
)
go_repository(
name = "io_k8s_kube_openapi",
importpath = "k8s.io/kube-openapi",
sum = "h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=",
version = "v0.0.0-20201113171705-d219536bb9fd",
)
go_repository(
name = "io_k8s_kubectl",
importpath = "k8s.io/kubectl",
sum = "h1:G0a3fJXvypzN1fDcO+clH131rpDxNtDZIgSuogSCtng=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_kubernetes",
importpath = "k8s.io/kubernetes",
sum = "h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=",
version = "v1.13.0",
)
go_repository(
name = "io_k8s_metrics",
importpath = "k8s.io/metrics",
sum = "h1:HVAYYKA/9HhKQX952EwE4hejvD61UALLpqYRYvRSvGo=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_sigs_apiserver_network_proxy_konnectivity_client",
importpath = "sigs.k8s.io/apiserver-network-proxy/konnectivity-client",
sum = "h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=",
version = "v0.0.15",
)
go_repository(
name = "io_k8s_sigs_controller_runtime",
importpath = "sigs.k8s.io/controller-runtime",
sum = "h1:GMHvzjTmaWHQB8HadW+dIvBoJuLvZObYJ5YoZruPRao=",
version = "v0.8.3",
)
go_repository(
name = "io_k8s_sigs_kustomize",
importpath = "sigs.k8s.io/kustomize",
sum = "h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=",
version = "v2.0.3+incompatible",
)
go_repository(
name = "io_k8s_sigs_structured_merge_diff_v4",
importpath = "sigs.k8s.io/structured-merge-diff/v4",
sum = "h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c=",
version = "v4.0.3",
)
go_repository(
name = "io_k8s_sigs_yaml",
importpath = "sigs.k8s.io/yaml",
sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
version = "v1.2.0",
)
go_repository(
name = "io_k8s_utils",
importpath = "k8s.io/utils",
sum = "h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=",
version = "v0.0.0-20210111153108-fddb29f9d009",
)
go_repository(
name = "io_opencensus_go",
importpath = "go.opencensus.io",
sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=",
version = "v0.23.0",
)
go_repository(
name = "io_rsc_pdf",
importpath = "rsc.io/pdf",
sum = "h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=",
version = "v0.1.1",
)
go_repository(
name = "net_starlark_go",
importpath = "go.starlark.net",
sum = "h1:lkYv5AKwvvduv5XWP6szk/bvvgO6aDeUujhZQXIFTes=",
version = "v0.0.0-20190702223751-32f345186213",
)
go_repository(
name = "org_bazil_fuse",
importpath = "bazil.org/fuse",
sum = "h1:SC+c6A1qTFstO9qmB86mPV2IpYme/2ZoEQ0hrP+wo+Q=",
version = "v0.0.0-20160811212531-371fbbdaa898",
)
go_repository(
name = "org_golang_google_api",
importpath = "google.golang.org/api",
replace = "google.golang.org/api",
sum = "h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA=",
version = "v0.43.0",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=",
version = "v1.6.7",
)
go_repository(
name = "org_golang_google_cloud",
importpath = "google.golang.org/cloud",
sum = "h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=",
version = "v0.0.0-20151119220103-975617b05ea8",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
replace = "google.golang.org/genproto",
sum = "h1:vVeMwkgjjF0rgUTvAJkHJC5hUf50yFdZFDpBgK2kVXI=",
version = "v0.0.0-20210329143202-679c6ae281ee",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
replace = "google.golang.org/grpc",
sum = "h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY=",
version = "v1.36.1",
)
go_repository(
name = "org_golang_google_protobuf",
importpath = "google.golang.org/protobuf",
sum = "h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=",
version = "v1.26.0",
)
go_repository(
name = "org_golang_x_arch",
importpath = "golang.org/x/arch",
sum = "h1:XmKBi9R6duxOB3lfc72wyrwiOY7X2Jl1wuI+RFOyMDE=",
version = "v0.0.0-20201008161808-52c3e6f60cff",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
replace = "github.com/golang/crypto",
sum = "h1:8llN7yzwGxwu9113L6qZhy5GAsXqM0CwzpGy7Jg4d8A=",
version = "v0.0.0-20210322153248-0c34fe9e7dc2",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
replace = "github.com/golang/exp",
sum = "h1:Y1U71lBbDwoAU53t+H+zyzEHffhVdtwbhunE3d+c248=",
version = "v0.0.0-20210220032938-85be41e4509f",
)
go_repository(
name = "org_golang_x_image",
importpath = "golang.org/x/image",
replace = "github.com/golang/image",
sum = "h1:2RD9fgAaBaM2OmMeoOZhrVFJncb2ZvYbaQBc93IRHKg=",
version = "v0.0.0-20210220032944-ac19c3e999fb",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
replace = "github.com/golang/lint",
sum = "h1:yoAjkFah23+tVFVuPMY+qVhs1qr4MxUlH0TAh80VbOw=",
version = "v0.0.0-20201208152925-83fdc39ff7b5",
)
go_repository(
name = "org_golang_x_mobile",
importpath = "golang.org/x/mobile",
replace = "github.com/golang/mobile",
sum = "h1:aCGaFYtce0DYTWxDA9R7t3PWnA41u4L9vVB5g5/WWgM=",
version = "v0.0.0-20210220033013-bdb1ca9a1e08",
)
go_repository(
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
replace = "github.com/golang/mod",
sum = "h1:lH77g1z4f17x8y6SttLVCeJEBqNXOmSodud0ja0tP60=",
version = "v0.4.2",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
replace = "github.com/golang/net",
sum = "h1:TlSK6ePheivtSKLVN+ngvv+GsazgRZ5EGFnJcDU3PSc=",
version = "v0.0.0-20210330142815-c8897c278d10",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
replace = "github.com/golang/oauth2",
sum = "h1:Zc7CevDpcBZhHjqOaX0sV8wd+9SOK8Ga79yw9fOhbxA=",
version = "v0.0.0-20210323180902-22b0adad7558",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
replace = "github.com/golang/sync",
sum = "h1:SaEy0CSD5VHxfkVDQp+KnOeeiSEG4LrHDCKqu9MskrQ=",
version = "v0.0.0-20210220032951-036812b2e83c",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
replace = "github.com/golang/sys",
sum = "h1:lYOSQzf0I48VjRWUcpmAx0uYpuOQKKW7jwuyKDxh7eo=",
version = "v0.0.0-20210326220804-49726bf1d181",
)
go_repository(
name = "org_golang_x_term",
importpath = "golang.org/x/term",
replace = "github.com/golang/term",
sum = "h1:Fl0EEbnS9r4M/efcJKb0c2ahDsLrvuA+DkkBfLQgYWA=",
version = "v0.0.0-20210317153231-de623e64d2a6",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
replace = "github.com/golang/text",
sum = "h1:ohhPA4cGdIMkmMvhC+HU6qBl8zeOoM8M8o8N6mbcL3U=",
version = "v0.3.5",
)
go_repository(
name = "org_golang_x_time",
importpath = "golang.org/x/time",
replace = "github.com/golang/time",
sum = "h1:xgei/lBA0MICqy4kX0+HHp9N3aFDmulXmfDG4mvhA+c=",
version = "v0.0.0-20210220033141-f8bda1e9f3ba",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
replace = "github.com/golang/tools",
sum = "h1:5KUMmBvKS2Hhl2vb1USRzLZbaiQ+cPEftIk2+QH7mAI=",
version = "v0.1.0",
)
go_repository(
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
replace = "github.com/golang/xerrors",
sum = "h1:jhmkoTjuPVg+HX0++Mq184QYuCgK29clNAbkZwI8/0Y=",
version = "v0.0.0-20200804184101-5ec99f83aff1",
)
go_repository(
name = "org_mongodb_go_mongo_driver",
importpath = "go.mongodb.org/mongo-driver",
replace = "go.mongodb.org/mongo-driver",
sum = "h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=",
version = "v1.5.1",
)
go_repository(
name = "org_mozilla_go_pkcs7",
importpath = "go.mozilla.org/pkcs7",
sum = "h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=",
version = "v0.0.0-20200128120323-432b2356ecb1",
)
go_repository(
name = "org_uber_go_atomic",
importpath = "go.uber.org/atomic",
sum = "h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=",
version = "v1.7.0",
)
go_repository(
name = "org_uber_go_goleak",
importpath = "go.uber.org/goleak",
sum = "h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=",
version = "v1.1.10",
)
go_repository(
name = "org_uber_go_multierr",
importpath = "go.uber.org/multierr",
sum = "h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=",
version = "v1.6.0",
)
go_repository(
name = "org_uber_go_tools",
importpath = "go.uber.org/tools",
sum = "h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=",
version = "v0.0.0-20190618225709-2cfd321de3ee",
)
go_repository(
name = "org_uber_go_zap",
importpath = "go.uber.org/zap",
sum = "h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=",
version = "v1.17.0",
)
go_repository(
name = "tools_gotest",
importpath = "gotest.tools",
sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=",
version = "v2.2.0+incompatible",
)
go_repository(
name = "tools_gotest_v3",
importpath = "gotest.tools/v3",
sum = "h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=",
version = "v3.0.3",
)
go_repository(
name = "xyz_gomodules_jsonpatch_v2",
importpath = "gomodules.xyz/jsonpatch/v2",
sum = "h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=",
version = "v2.1.0",
)
|
py | b40238127302bff725304a524b70c047be411b3b | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import CollegicoinTestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(CollegicoinTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
wait_to_sync(self.nodes[0])
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
py | b40238b2c321ffa330fc8d82f571b99c929b6f46 | import numpy as np
import pymc as pm
import matplotlib.pyplot as plt
plt.style.use('ggplot')
tau = pm.rdiscrete_uniform(0, 80)
print("tau: {0:.2f}".format(tau))
alpha = 1./20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
print("lambda_1: {0:.2f} lambda_2:{1:.2f}".format(lambda_1, lambda_2))
lambda_ = np.r_[lambda_1 * np.ones(tau), lambda_2 * np.ones(80 - tau)]
print("lambda_: {0}".format(lambda_))
data = pm.rpoisson(lambda_)
print("data: {0}".format(data))
plt.bar(np.arange(80), data)
plt.bar(tau - 1, data[tau-1], color="r", label="user behavior change")
plt.xlabel("Time (days)")
plt.ylabel("Text messages received")
plt.title("Simulation")
plt.xlim(0, 80)
plt.legend()
plt.show() |
py | b402391165d57c59b1ded55d69ca140aeb5fadc6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 1/10/21
# @Author : Daniel Ordonez
# @email : [email protected]
from math import pi as PI
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from slip_control.slip.slip_trajectory import SlipTrajectory
from slip_control.slip.slip_model import SlipModel, X, X_DOT, X_DDOT, Z, Z_DOT, Z_DDOT
from slip_control.controllers.target_to_states_generator import CycleStateGenerator, ForwardSpeedStateGenerator
from slip_control.controllers.diff_flat_slip_controller import SlipDiffFlatController
from slip_control.utils import plot_utils
cmap = plt.cm.get_cmap('gist_heat')
if __name__ == "__main__":
# Instantiate SLIP model
m = 80 # [kg]
r0 = 1.0 # [m]
n_legs = 1
k_rel = 10.7
slip = SlipModel(mass=m, leg_length=r0, k_rel=k_rel * n_legs)
g = SlipModel.g
# Error deviation weights during the stance trajectory
traj_weights = np.array([1., 1., 1., 1., 1., 1.])
traj_weights /= np.linalg.norm(traj_weights)
# Error deviation weights of target take-off states
take_off_state_error_weights = np.array([0.0, 1.0, 0., 1.0, 1.0, 0.])
take_off_state_error_weights /= np.linalg.norm(take_off_state_error_weights)
n_cycles = 5 # Generate a trajectory of 5 cycles
max_theta_dot = 4*PI # [rad/s] max angular leg velocity during flight
# Define a forward velocity
forward_speed = 4 * slip.r0 # [m/s]
# Define a desired gait duty cycle (time of stance / time of cycle) in [0.2, 1.0]
duty_cycle = 0.8
z_init = slip.r0
# Set an initial state (assumed to be a flight phase state) [x, x', x'', z, z', z'']
init_to_state = np.array([0.0, forward_speed, 0.0, z_init, 0.0, -g])
# Set a desired take off state defining the forward and vertical velocity desired
to_des_state = init_to_state
# Configure Differentially flat controller
slip_controller = SlipDiffFlatController(slip_model=slip,
traj_weights=traj_weights,
max_flight_theta_dot=max_theta_dot,
debug=False)
to_state_generator = ForwardSpeedStateGenerator(slip_model=slip, target_state_weights=take_off_state_error_weights,
desired_forward_speed=forward_speed,
desired_duty_cycle=duty_cycle)
slip_controller.target_to_state_generator = to_state_generator
# Generate SLIP trajectory tree without future cycle planning
tree = slip_controller.generate_slip_trajectory_tree(desired_gait_cycles=n_cycles,
initial_state=init_to_state,
max_samples_per_cycle=30,
angle_epsilon=np.deg2rad(.02),
look_ahead_cycles=0)
slip_traj_no_future = tree.get_optimal_trajectory()
plot_utils.plot_slip_trajectory(slip_traj_no_future, plot_passive=True, plot_td_angles=True,
title="Without future cycle planning",
color=(23/255., 0/255., 194/255.))
plt.show()
# Generate SLIP trajectory tree with future cycle planning
tree = slip_controller.generate_slip_trajectory_tree(desired_gait_cycles=n_cycles,
initial_state=init_to_state,
max_samples_per_cycle=30,
angle_epsilon=np.deg2rad(.02),
look_ahead_cycles=1)
slip_traj = tree.get_optimal_trajectory()
plot_utils.plot_slip_trajectory(slip_traj, plot_passive=True, plot_td_angles=True,
title="With future cycle planing",
color=(23 / 255., 154 / 255., 194 / 255.))
plt.show()
# Plot controlled trajectory tree
print("This takes a while... should optimize soon")
tree.plot()
plt.show()
# Compare first two cycles.
short_traj = SlipTrajectory(slip, slip_gait_cycles=slip_traj.gait_cycles[:2])
short_traj_no_future = SlipTrajectory(slip, slip_gait_cycles=slip_traj_no_future.gait_cycles[:2])
axs = plot_utils.plot_slip_trajectory(short_traj, plot_passive=True, plot_td_angles=True,
color=(23 / 255., 154 / 255., 194 / 255.))
axs = plot_utils.plot_slip_trajectory(short_traj_no_future, plot_passive=True, plot_td_angles=True, plt_axs=axs,
color=(23/255., 0/255., 194/255.))
plt.show()
# Plot limit cycles of controlled trajectory
phase_axs = plot_utils.plot_limit_cycles(slip_traj)
plt.show() |
py | b40239ea07f10d64b498ff40fc86389c3d69ebd5 | import pytest
from vasaloppet.MultiLevelCacheDic import *
def test_cachedic_tryget():
cache = MultiLevelCacheDic[int, str](lambda x: str(x))
key = 314
value = cache.TryGet(key)
assert value is None
def test_cachedic_getorinit():
cache = MultiLevelCacheDic[int, str](lambda x: str(x))
key = 314
value = cache.GetOrInit(key)
assert value == '314'
def test_cachedic_addorupdate():
cache = MultiLevelCacheDic[int, str](lambda x: str(x))
key = 314
assert cache.TryGet(key) == None
cache.AddOrUpdate(key, 'pi')
assert cache.TryGet(key) == 'pi'
cache.AddOrUpdate(key, 'tau')
assert cache.TryGet(key) == 'tau'
def test_cachedic_integrate_count_str():
cache = MultiLevelCacheDic[int, str](lambda x: str(x))
count = lambda x: len(x)
assert cache.Integrate(count) == 0
cache.GetOrInit(123)
assert cache.Integrate(count) == 1
|
py | b4023a165ade9037df585a7b530f206ae4ff0971 | from django.utils.translation import ugettext
from django.conf import settings
from django.utils import translation
def get_devilry_hard_deadline_info_text(setting_name):
"""
Get the hard deadline info text from settings based on the current language code.
Args:
setting_name: The name of the setting to use.
Returns:
str: Info text.
"""
info_dict = getattr(settings, setting_name, None)
languagecode = translation.get_language()
if info_dict and languagecode in info_dict:
return info_dict[languagecode]
try:
default_info = info_dict['__default']
except KeyError:
raise ValueError("User error: The {} must contain a '__default' info setting. "
"This exists by default and has been wrongly removed during setup.".format(setting_name))
return ugettext(default_info)
|
py | b4023a71536ce8f22c3634b80dc70d57646364a1 | import numpy as np
from scipy.ndimage import label
import torch
from torch.nn import functional as F
from torch.nn import ReLU
def create_gbackprop(model, device, target_class, dataloader=None, id_series=None, array_series=None, clip = 0):
"""
Create class activation map either by providing a series id and looping through loader
until the id is found, or directly by providing the series as numerical array.
If none is provided, but a dataloader is, will just pick next trajectory there
:param model: variable containing the neural net.
:param device: device of model.
:param target_class: Backprop for which class? If 'prediction'.
:param dataloader: must have batch size 1. In a sample, the series must be returned as 'series',
identifier as 'identifier'.
:param id_series: If provided, loop through loader to look for this series.
:param array_series: Manually provide a sample, must be a tensor.
:param clip: Clip max value to n standard deviation.
:return: Saliency map as numpy array
"""
# Pick series either from input, dataloader and ID, or next from data loader
# ----------------------------------------------------------------------------
series, id_series = select_series(dataloader, id_series, array_series, device, return_id=True)
# ----------------------------------------------------------------------------
# Modify the backpropagation through ReLU layers (guided backprop)
def relu_hook_function(module, grad_in, grad_out):
"""If there is a negative gradient, changes it to zero"""
if isinstance(module, ReLU):
return (torch.clamp(grad_in[0], min=0.0),)
# Loop through layers, hook up ReLUs with relu_hook_function
# backward hook will modify gradient in ReLU during backprop
hook_idx = 0
for pos, module in model.features._modules.items():
if isinstance(module, ReLU):
# Use unique names for each hook in order to be able to remove them later
hook_name = "hook" + str(hook_idx)
exec(hook_name + "= module.register_backward_hook(relu_hook_function)")
hook_idx += 1
# Create saliency map
# Start recording operations on input
series.requires_grad_()
model.batch_size = 1
output = model(series)
model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, output.shape[-1]).zero_()
one_hot_output[0][target_class] = 1
one_hot_output = one_hot_output.double()
one_hot_output = one_hot_output.to(device)
# Vanilla Backprop
output.backward(gradient=one_hot_output)
# Gradients wrt inputs are the saliency map
saliency = series.grad.squeeze().cpu().numpy()
if clip:
saliency = np.clip(saliency, a_min=None, a_max = np.mean(saliency)+clip*np.std(saliency))
# Remove hooks from model
for idx in range(hook_idx):
hook_name = "hook" + str(idx)
exec(hook_name + ".remove()")
return saliency
def create_cam(model, device, dataloader=None, id_series=None, array_series=None, feature_layer='features',
clip=0, target_class='prediction'):
"""
Create class activation map either by providing a series id and looping through loader
until the id is found, or directly by providing the series as numerical array.
If none is provided, but a dataloader is, will just pick next trajectory there
:param model: variable containing the neural net.
:param device: device of model.
:param dataloader: must have batch size 1. In a sample, the series must be returned as 'series',
identifier as 'identifier'.
:param id_series: If provided, loop through loader to look for this series.
:param array_series: Manually provide a sample, must be a tensor.
:param feature_layer: Name of the last convolution layer.
:param clip: Clip max value to n standard deviation.
:param target_class: Create a CAM for which class? If 'prediction', creates a CAM for the predicted class. Otherwise
give index of the class.
:return: CAM as a numpy array.
"""
# Def and checks
def returnCAM(feature_conv, weight_softmax, class_idx):
"""
Perform CAM computation: use weights of softmax to weight individual filter response in the filter layer.
feature_conv: output of last convolution before global average pooling.
weight_soft_max: array with all softmax weights
class_idc: index of the class for which to produce the CAM.
"""
# Batch size, number channels (features, number of filters in convolution layer),
# height (nber measurements), width (length measurements)
bz, nc, h, w = feature_conv.shape
cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam - np.min(cam)
cam = cam.reshape(h,w)
return cam
def returnCAM1D(feature_conv, weight_softmax, class_idx):
"""
Special case of CAM when input has only one measurement. Identical to returnCAM except for shape that has one
less dimension.
"""
# Batch size, number channels, length
bz, nc, l = feature_conv.shape
cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, l)))
cam = cam - np.min(cam)
return cam
if model.batch_size != 1:
raise ValueError('Batch size of model must be 1')
# Hook the layer output before average pooling (last feature layer)
feature_blobs = []
def hook_feature(module, input, output):
feature_blobs.append(output.cpu().data.numpy())
model._modules.get(feature_layer).register_forward_hook(hook_feature)
# Get weights associated with each average pool element
params = list(model.parameters())
weight_softmax = np.squeeze(params[-2].cpu().data.numpy())
# Get series and id for plot
series, id_series = select_series(dataloader, id_series, array_series, device, return_id=True)
# Create CAM
logit = model(series)
h_x = F.softmax(logit, dim=1).data.squeeze()
if target_class == 'prediction':
# Return CAM for predicted class
probs, idx = h_x.sort(dim=0, descending=True)
if len(feature_blobs[0].shape) == 3:
CAM = returnCAM1D(feature_blobs[0], weight_softmax, [idx[0].item()]).squeeze()
elif len(feature_blobs[0].shape) > 3:
CAM = returnCAM(feature_blobs[0], weight_softmax, [idx[0].item()]).squeeze()
else:
if len(feature_blobs[0].shape) == 3:
CAM = returnCAM1D(feature_blobs[0], weight_softmax, target_class).squeeze()
elif len(feature_blobs[0].shape) > 3:
CAM = returnCAM(feature_blobs[0], weight_softmax, target_class).squeeze()
# Clip high values to improve map readability
if clip:
np.clip(CAM, a_min=None, a_max=np.mean(CAM) + clip*np.std(CAM), out=CAM)
return CAM
def returnCAM(feature_conv, weight_softmax, class_idx):
bz, nc, l = feature_conv.shape
cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, l)))
cam = cam - np.min(cam)
return cam
def extend_segments1D(array, max_ext, direction='both'):
"""Extend regions of 1D binarized array."""
# Spot regions with derivatives and extend n times
assert len(array.shape) == 1
ext_array = np.array(array).copy()
for i in range(max_ext):
diff = np.diff(ext_array)
right_ind = (np.where(diff == -1)[0] + 1,) # back to tuple to allow direct array indexing
left_ind = np.where(diff == 1)
if direction=='right' or direction=='both':
ext_array[right_ind] = 1
if direction=='left' or direction=='both':
ext_array[left_ind] = 1
return ext_array
def extend_segments(array, max_ext, direction ='both'):
"""Extend regions of 1 or 2D binarized array. If 2D, each row will be extended independently."""
assert len(array.shape) == 1 or len(array.shape) == 2
if len(array.shape) == 1:
ext_array = extend_segments1D(array, max_ext, direction)
elif len(array.shape) == 2:
ext_array = np.array(array).copy()
for irow in range(array.shape[0]):
ext_array[irow, :] = extend_segments1D(array[irow, :], max_ext, direction)
return ext_array
def longest_segments(array, k=None, structure=None):
"""Return the k longest segments of 1s in a binary array. Structure must be a valid argument of
scipy.ndimage.label. By default, segments can be connected vertically and horizontally, pass appropriate structure
for different behaviour. Output is a dictionary where values are the size of the segment and keys are tuples that
indicate all the positions of a segment, just like numpy.where(). So can use the keys to directly subset an numpy
array at the positions of the segments."""
assert np.all(np.isin(array, [0,1]))
# Label each segment with a different integer, 0s are NOT labeled (i.e. remain 0)
array_segments, num_segments = label(array, structure=structure)
label_segments, size_segments = np.unique(array_segments, return_counts=True)
# Special case when only 1s in the array
if not np.all(array==1):
# np.unique returns ordered values, so 0 is always first
label_segments = np.delete(label_segments, 0)
size_segments = np.delete(size_segments, 0)
# Longest segments first, along with label
sorted_segments = sorted(zip(size_segments, label_segments), reverse=True)
if k:
sorted_segments = sorted_segments[:k]
# Need to convert np.where output to tuple for hashable
out = {tuple(tuple(i) for i in np.where(array_segments == lab)): size for size, lab in sorted_segments}
return out
def extract_pattern(origin_array, coord_tuple, NA_fill = True):
"""
Extract a pattern from an array via its list of coordinates stored in a tuple (as returned by np.where() or
longest_segments()). The pattern has rectangular shape, with NA padding if NA_fill is True. This is useful to export
patterns in 2 or more dimensions and plot them/compute distances between them.
:param coord_tuple: a tuple of coordinates as returned by np.where(). For example ((x1,x2,x3), (y1,y2,y3)).
:param origin_array: an array from which to extract the pattern.
:param NA_fill bool, whether to fill parts of the rectangle not listed in coord_tuple. IF False, will use values
from origin_array.
:return: a rectangular 2D numpy array with the pattern, padded with NAs. Number of rows from origin_array is
maintained.
"""
assert len(origin_array.shape) == 1 or len(origin_array.shape) == 2
assert len(origin_array.shape) == len(coord_tuple)
if NA_fill:
out = np.full_like(origin_array, np.nan)
if len(origin_array.shape) == 1:
out[coord_tuple] = origin_array[coord_tuple]
out = out[np.min(coord_tuple[1]) : (np.max(coord_tuple[1]) + 1)]
elif len(origin_array.shape) == 2:
out[coord_tuple] = origin_array[coord_tuple]
out = out[:, np.min(coord_tuple[1]) : (np.max(coord_tuple[1]) + 1)]
elif len(origin_array.shape) == 1:
out = origin_array[np.min(coord_tuple) : (np.max(coord_tuple)+1)]
elif len(origin_array.shape) == 2:
out = origin_array[:, np.min(coord_tuple[1]) : (np.max(coord_tuple[1])+1)]
return out
def select_series(dataloader=None, id_series=None, array_series=None, device=None, return_id=True):
"""
Used in create_*_maps to select a series either from a dataloader with ID or directly use provided series. Can also
provide a dataloader without ID to simply pick up next series in the loader.
:return: The series properly formatted
"""
flag_series = True
if id_series is not None and array_series is not None:
raise ValueError('At most one of "id_series" and "array_series" can be provided.')
# If the series is provided as in ID, loop through loader until found
if id_series:
# go to list because typically provided as string but pytorch batch convert to list
id_series = [id_series]
if dataloader.batch_size != 1:
print("Size of dataloader must be 1")
return
for sample in dataloader:
if sample['identifier'] == id_series:
series = sample['series']
series = series.to(device)
if len(series.shape) == 1:
series = series.view(1, len(series))
# uni: batch, 1 dummy channel, length TS
# (1,1,length) for uni; (1,1,2,length) for bi
assert len(series.shape) == 2
nchannel, univar_length = series.shape
if nchannel == 1:
view_size = (1, 1, univar_length)
elif nchannel >= 2:
view_size = (1, 1, nchannel, univar_length)
flag_series = False
break
# If not found
if flag_series:
print('ID not found in the dataloader')
return
if array_series is not None:
series = array_series
series = series.double()
series = series.to(device)
if len(series.shape) == 1:
series = series.view(1, len(series))
# uni: batch, 1 dummy channel, length TS
# (1,1,length) for uni; (1,1,2,length) for bi
assert len(series.shape) == 2
nchannel, univar_length = series.shape
if nchannel == 1:
view_size = (1, 1, univar_length)
elif nchannel >= 2:
view_size = (1, 1, nchannel, univar_length)
series = series.view(view_size)
id_series = "Series manually provided"
flag_series = False
if flag_series:
sample = next(iter(dataloader))
series, correct_class, id_series = sample['series'], sample['label'], sample['identifier']
print("When sampling from dataloader, take the actual class of the sample instead of input.")
series = series.to(device)
# uni: batch, 1 dummy channel, length TS
# (1,1,length) for uni; (1,1,2,length) for bi
assert len(series.shape) == 2
nchannel, univar_length = series.shape
if nchannel == 1:
view_size = (1, 1, univar_length)
elif nchannel >= 2:
view_size = (1, 1, nchannel, univar_length)
series = series.view(view_size)
if return_id:
return series, id_series
else:
return series
|
py | b4023b2c20eeac09aa00014b281c3d950b49a212 | # This file needs to be copied into any subfolders with Python batch scripts. |
py | b4023b5a35aedf37a594a9b24d46a884a7367b19 | import threading, requests, json, time, sys, signal
class AnomalyTrigger(threading.Thread):
#URL path to anomaly REST API
ANOMALY_URL = "/api/anomalies/"
#JSON identifier names in payload
ANMLY_NAME = "name"
ANMLY_PARAMETER = "parameter"
ANMLY_PAYLOAD = "payload"
ANMLY_TERMINATE = "terminate"
ANMLY_DELAY = "delay"
ANMLY_DELAY_FINISHED = "delay_finished"
#Own name
ANMLY_SELF = "root_cause"
def __init__(self, target_host, anomalies, interval=1000):
self.running = 1
self.terminated = 0
self.target_host = target_host + self.ANOMALY_URL
self.anomalies = anomalies
self.interval = interval
super(AnomalyTrigger, self).__init__()
def stop(self):
"""
Triggers regular stopping of the running thread.
If enabled, triggers terminating of remote anomalies.
"""
self.running = 0
for anomaly in self.anomalies:
#Check if anomaly was already triggered
if self.__check_anomaly_delay_finished(anomaly):
#Check if triggered anomaly should be terminated
if self.ANMLY_TERMINATE in anomaly:
if anomaly[self.ANMLY_TERMINATE]:
requests.delete(self.target_host + anomaly[self.ANMLY_NAME] + "/")
def run(self):
"""
Performs cyclic triggering of remote anomalies.
Allows recursive triggering of another root_cause anomaly.
Supports initial delay for each anomaly.
"""
#Initial delay
self.__check_anomalies_for_delay()
while self.running or not self.terminated:
for anomaly in self.anomalies:
if self.running:
#Trigger only if initial delay is finished
if self.__check_anomaly_delay_finished(anomaly):
if anomaly[self.ANMLY_NAME] == self.ANMLY_SELF:
self.__handle_recursion(anomaly[self.ANMLY_PAYLOAD])
else:
requests.post(self.target_host + anomaly[self.ANMLY_NAME] + "/", \
json=anomaly[self.ANMLY_PAYLOAD])
else:
self.terminated = 1
break
time.sleep(self.interval / 1000)
def __check_anomaly_delay_finished(self, anomaly):
"""
Checks whether the initial delay for an anomaly was already performed.
Args:
anomaly:
Anomaly which should be checked.
Returns:
true if the deleay for the given anomaly is finished.
false otherwise.
"""
if self.ANMLY_DELAY_FINISHED in anomaly:
if anomaly[self.ANMLY_DELAY_FINISHED]:
return True
return False
def __handle_recursion(self, payload):
"""
Handles recursive triggering of another root_cause anomaly.
Args:
payload:
Payload parameter for the roo_cause which should be
triggered.
"""
p = {self.ANMLY_PARAMETER:json.dumps(payload[self.ANMLY_PARAMETER])}
requests.post(self.target_host + self.ANMLY_SELF + "/", \
json=p)
def __check_anomalies_for_delay(self):
"""
Checks and executes the delay of each anomaly.
Each anomaly's delay is handled in own thread.
Each thread looks out for termination of parent.
"""
def waiting_loop(root, anomaly):
time_counter = 0
print(anomaly[root.ANMLY_DELAY])
while time_counter < anomaly[root.ANMLY_DELAY] and root.running:
time.sleep(0.1)
time_counter = time_counter + 100
anomaly[root.ANMLY_DELAY_FINISHED] = True
for anomaly in self.anomalies:
if self.ANMLY_DELAY in anomaly:
t = threading.Thread(target=waiting_loop, args=(self, anomaly))
t.start()
else:
anomaly[self.ANMLY_DELAY_FINISHED] = True
if __name__ == "__main__":
anomaly_trigger = []
#Signal handler for SIGINT and SIGTERM
def signal_handler(signal, frame):
for anomaly in anomaly_trigger:
anomaly.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
#Process JSON argument
data = json.loads(sys.argv[1])
#Start own thread for each target
for target in data["targets"]:
at = AnomalyTrigger(target["host"], target["anomalies"])
anomaly_trigger.append(at)
at.start()
for a in anomaly_trigger:
a.join() |
py | b4023c0523e5af87000631470808354d3c34dbd1 | """
Django settings for file_repo project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Folder path for the folder to be monitored for the new file arrivals.
FOLDER_PATH = "/Users/muhammadharisnp/ProductDevelopment/filetest/textfiles"
# Time interval for the job to monitor the folder. It is specified in seconds.
TIME_INTERVAL = 1
# The number of times you want the process to run.
SCANNING_NUMBER = 1
# Turn off or on authenticaion.
AUTHENTICATION_ON = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '##)9e1rv%p&j^$1crp5180)vxw(dq!2uqoyp7ew9l-yon05nsi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'filesapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'file_repo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'file_repo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': "127.0.0.1",
'NAME': "file_repo_db",
'USER': "haris",
'PASSWORD': "12345",
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.\
UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
py | b4023c7ad94c3f36446620e2c9c28ce37b9ffefc | """
WSGI config for {{cookiecutter.project_name}} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/stable/howto/deployment/wsgi/
"""
import os
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "{{cookiecutter.project_slug}}.settings"
)
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
from configurations.wsgi import get_wsgi_application # noqa isort:skip
application = get_wsgi_application()
|
py | b4023e034fc238619762485a93798e3ea8a10972 | import math
"""
Calculate how much one could donate if one saved the money vs donating now
"""
def adjust_for_inflation(total, inflation, career_length):
return total / math.pow(inflation, career_length)
def simulate_saving(career_length, annual_donation, growth_rate, inflation, donations_per_year):
"""
calculate the amount of money you would have saved up if you set a certain
FRACTION OF YOUR INCOME aside a certain NUMBER OF TIMES A YEAR for a certain
NUMBER OF YEARS given an INFLATION RATE, GROWTH RATE OF YOUR SAVINGS, and
ANNUAL DONATIONS
"""
growth_rate_per_donation = math.pow(growth_rate, 1 / donations_per_year)
inflation_per_donation = math.pow(inflation, 1 / donations_per_year)
num_donations = career_length * donations_per_year
amount_donated = annual_donation / donations_per_year # grows with inflation
total = 0
for _ in range(num_donations):
total *= growth_rate_per_donation
total += amount_donated
amount_donated *= inflation_per_donation
return total
def simulate_instant_donating(career_length, annual_income, fraction_donated):
return career_length * annual_income * fraction_donated
def format_usd(x):
return '${:,.2f}'.format(x)
|
py | b4023f44cb534c858e4c78350d7410a630fdc32f | import sys, argparse
import distance
def process_args(args):
parser = argparse.ArgumentParser(description='Evaluate text edit distance.')
parser.add_argument('--target-formulas', dest='target_file',
type=str, required=True,
help=(
'target formulas file'
))
parser.add_argument('--predicted-formulas', dest='predicted_file',
type=str, required=True,
help=(
'predicted formulas file'
))
parameters = parser.parse_args(args)
return parameters
def main(args):
parameters = process_args(args)
target_formulas_file = parameters.target_file
predicted_formulas_file = parameters.predicted_file
target_formulas = open(target_formulas_file).readlines()
predicted_formulas = open(predicted_formulas_file).readlines()
i = 0
total_len = 0
total_edit_distance = 0
if len(target_formulas) != len(predicted_formulas):
print("number of formulas doesn't match")
return
n = len(target_formulas)
for tf, pf in zip(target_formulas, predicted_formulas):
i += 1
if i % 2000 == 0:
print("{}/{}".format(i, n))
tf_ = tf.strip()
pf_ = pf.strip()
true_token = tf_.split(' ')
predicted_tokens = pf_.split(' ')
l = max(len(true_token), len(predicted_tokens))
edit_distance = distance.levenshtein(true_token, predicted_tokens)
total_len += l
total_edit_distance += edit_distance
print("{}/{}".format(n, n))
print('Edit Distance Accuracy: %f' % (1. - float(total_edit_distance) / total_len))
if __name__ == '__main__':
main(sys.argv[1:])
|
py | b4024130a6da2cd098136735c6582fdf0bb89536 | #!/usr/bin/env python3
"""
Author : mtoucedasuarez <mtoucedasuarez@localhost>
Date : 2021-09-24
Purpose: Python implementation of the 'cat' function
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Get the arguments',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('files',
metavar='FILE',
nargs='+',
help='Input file(s)',
type=argparse.FileType('rt'))
parser.add_argument('-n',
'--number',
help='Number the lines',
action='store_true',
default='False')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
for fh in args.files:
for line_num, line in enumerate(fh, start=1):
if args.number is True:
# print(line_num, line, sep='\t', end='')
print(" {}\t{}".format(line_num, line), end='')
else:
print(line, end='')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
py | b4024175a0c5dde835b4fde14100de331ecbba85 | # Generated by Django 3.2.6 on 2021-08-08 13:02
import ckeditor_uploader.fields
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import hitcount.mixins
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=128)),
('message', models.TextField()),
('createdon', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('meta_title', models.CharField(blank=True, max_length=255, null=True)),
('slug', models.SlugField(blank=True, max_length=255, null=True)),
('description', models.CharField(blank=True, max_length=512, null=True)),
('content', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True)),
('createdon', models.DateTimeField(auto_now_add=True, null=True)),
('publishedon', models.DateTimeField(blank=True, null=True)),
('updatedon', models.DateTimeField(auto_now=True, null=True)),
('hidden', models.BooleanField(default=False)),
('published', models.BooleanField(default=True)),
],
bases=(models.Model, hitcount.mixins.HitCountModelMixin),
),
migrations.CreateModel(
name='Section',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='community.page')),
('members_joined', models.IntegerField()),
],
bases=('community.page',),
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('photo', models.ImageField(blank=True, null=True, upload_to='')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Post',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='community.page')),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('json_data', models.JSONField(blank=True, null=True)),
('category', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='community.section')),
('createdby', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=('community.page',),
),
]
|
py | b4024388e5c82fee2c552f3348ee7e80dea1db36 | # Script for training and synthesizing from the AutoVC model
import tensorflow as tf
import numpy as np
import librosa
import pyworld as pw
import sys
import os,re
import soundfile as sf
import matplotlib.pyplot as plt
import time
import h5py
from synth.data_pipeline import SDN as data_pipeline
from synth.config import config
from . import model
from synth.modules import autovc as modules_autovc
from synth.modules import SDN as modules_SDN
from synth.utils import utils, sig_process, midi_process, vamp_notes
def binary_cross(p,q):
return -(p * tf.log(q + 1e-12) + (1 - p) * tf.log( 1 - q + 1e-12))
class SDN(model.Model):
def __init__(self):
self.check_prep()
self.get_placeholders()
self.model()
self.sess = tf.Session()
summary_dict = self.loss_function()
self.get_optimizers()
self.get_summary(self.sess, config.SDN_log_dir, summary_dict)
self.load_model(self.sess)
def load_model(self, sess):
"""
Load model parameters, for synthesis or re-starting training.
"""
self.init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
auto_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='decoder') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='post_net')
self.auto_saver = tf.train.Saver(max_to_keep= config.max_models_to_keep, var_list = auto_var_list)
self.stft_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='stft_encoder') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='stft_decoder') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='stft_post_net') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope = 'F0_Model') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope = 'Vuv_Model')
self.stft_saver = tf.train.Saver(max_to_keep= config.max_models_to_keep, var_list = self.stft_var_list)
sess.run(self.init_op)
ckpt_auto = tf.train.get_checkpoint_state(config.autovc_log_dir)
if ckpt_auto and ckpt_auto.model_checkpoint_path:
print("Using the AUTOVC model in %s"%ckpt_auto.model_checkpoint_path)
self.auto_saver.restore(sess, ckpt_auto.model_checkpoint_path)
ckpt_stft = tf.train.get_checkpoint_state(config.SDN_log_dir)
if ckpt_stft and ckpt_stft.model_checkpoint_path:
print("Using the STFT model in %s"%ckpt_stft.model_checkpoint_path)
self.stft_saver.restore(sess, ckpt_stft.model_checkpoint_path)
def save_model(self, sess, epoch, log_dir):
"""
Save the model.
"""
checkpoint_file = os.path.join(log_dir, 'model.ckpt')
self.stft_saver.save(sess, checkpoint_file, global_step=epoch)
def get_optimizers(self):
"""
Returns the optimizers for the model, based on the loss functions and the mode.
"""
self.optimizer = tf.train.AdamOptimizer(learning_rate = config.init_lr)
self.f0_optimizer = tf.train.AdamOptimizer(learning_rate = config.init_lr)
self.vuv_optimizer = tf.train.AdamOptimizer(learning_rate = config.init_lr)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.global_step_f0 = tf.Variable(0, name='global_step_f0', trainable=False)
self.global_step_vuv = tf.Variable(0, name='global_step_vuv', trainable=False)
self.harm_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='stft_encoder') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='stft_decoder') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='stft_post_net')
self.f0_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope = 'F0_Model')
self.vuv_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope = 'Vuv_Model')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.final_train_function = self.optimizer.minimize(self.final_loss, global_step=self.global_step, var_list=self.harm_params)
self.f0_train_function = self.f0_optimizer.minimize(self.f0_loss, global_step=self.global_step_f0, var_list=self.f0_params)
self.vuv_train_function = self.vuv_optimizer.minimize(self.vuv_loss, global_step=self.global_step_vuv, var_list=self.vuv_params)
def loss_function(self):
"""
returns the loss function for the model, based on the mode.
"""
self.recon_loss = tf.reduce_sum(tf.square(self.input_placeholder - self.output_stft) )
self.content_loss = tf.reduce_sum(tf.abs(self.content_embedding_1 - self.content_embedding_stft))
self.content_loss_2 = tf.reduce_sum(tf.abs(self.content_embedding_1 - self.content_embedding_stft_2))
self.content_loss = self.content_loss + self.content_loss_2
self.recon_loss_0 = tf.reduce_sum(tf.square(self.input_placeholder - self.output_stft_1))
self.vuv_loss = tf.reduce_mean(tf.reduce_mean(binary_cross(self.vuv_placeholder, self.vuv)))
self.f0_loss = tf.reduce_sum(tf.abs(self.f0 - self.f0_placeholder)*(1-self.vuv_placeholder))
self.final_loss = self.recon_loss + config.mu * self.recon_loss_0 + config.lamda * self.content_loss
summary_dict = {"recon_loss" : self.recon_loss, "content_loss": self.content_loss, "recon_loss_0": self.recon_loss_0, "final_loss": self.final_loss,\
"f0_loss": self.f0_loss, "vuv_loss": self.vuv_loss}
return summary_dict
def get_placeholders(self):
"""
Returns the placeholders for the model.
Depending on the mode, can return placeholders for either just the generator or both the generator and discriminator.
"""
self.input_placeholder = tf.placeholder(tf.float32, shape=(config.batch_size, config.max_phr_len, config.num_features),
name='input_placeholder')
self.stft_placeholder = tf.placeholder(tf.float32, shape=(config.batch_size, config.max_phr_len, config.stft_features),
name='stft_placeholder')
self.speaker_labels = tf.placeholder(tf.float32, shape=(config.batch_size),name='singer_placeholder')
self.speaker_onehot_labels = tf.one_hot(indices=tf.cast(self.speaker_labels, tf.int32), depth = config.num_singers)
self.speaker_labels_1 = tf.placeholder(tf.float32, shape=(config.batch_size),name='singer_placeholder')
self.speaker_onehot_labels_1 = tf.one_hot(indices=tf.cast(self.speaker_labels_1, tf.int32), depth = config.num_singers)
self.vuv_placeholder = tf.placeholder(tf.float32, shape=(config.batch_size,config.max_phr_len,1),name='vuv_placeholder')
self.f0_placeholder = tf.placeholder(tf.float32, shape=(config.batch_size,config.max_phr_len,1),name='f0_placeholder')
self.is_train = tf.placeholder(tf.bool, name="is_train")
def train(self):
"""
Function to train the model, and save Tensorboard summary, for N epochs.
"""
start_epoch = int(self.sess.run(tf.train.get_global_step()) / (config.autovc_batches_per_epoch_train))
print("Start from: %d" % start_epoch)
for epoch in range(start_epoch, config.SDN_num_epochs):
data_generator = data_pipeline.data_gen_SDN()
val_generator = data_pipeline.data_gen_SDN(mode = 'Val')
epoch_final_loss = 0
epoch_recon_loss = 0
epoch_recon_0_loss = 0
epoch_content_loss = 0
epoch_f0_loss = 0
epoch_vuv_loss = 0
val_final_loss = 0
val_recon_loss = 0
val_recon_0_loss = 0
val_content_loss = 0
val_f0_loss = 0
val_vuv_loss = 0
batch_num = 0
start_time = time.time()
with tf.variable_scope('Training'):
for feats_targs, stft_targs, targets_speakers in data_generator:
final_loss, recon_loss, recon_loss_0, content_loss, f0_loss, vuv_loss, summary_str = self.train_model(feats_targs, stft_targs, targets_speakers, self.sess)
epoch_final_loss+=final_loss
epoch_recon_loss+=recon_loss
epoch_recon_0_loss+=recon_loss_0
epoch_content_loss+=content_loss
epoch_f0_loss+=f0_loss
epoch_vuv_loss+=vuv_loss
self.train_summary_writer.add_summary(summary_str, epoch)
self.train_summary_writer.flush()
utils.progress(batch_num,config.autovc_batches_per_epoch_train, suffix = 'training done')
batch_num+=1
epoch_final_loss = epoch_final_loss/batch_num
epoch_recon_loss = epoch_recon_loss/batch_num
epoch_recon_0_loss = epoch_recon_0_loss/batch_num
epoch_content_loss = epoch_content_loss/batch_num
epoch_f0_loss = epoch_f0_loss/batch_num
epoch_vuv_loss = epoch_vuv_loss/batch_num
print_dict = {"Final Loss": epoch_final_loss}
print_dict["Recon Loss"] = epoch_recon_loss
print_dict["Recon Loss_0 "] = epoch_recon_0_loss
print_dict["Content Loss"] = epoch_content_loss
print_dict["F0 Loss "] = epoch_f0_loss
print_dict["VUV Loss"] = epoch_vuv_loss
batch_num = 0
with tf.variable_scope('Validation'):
for feats_targs, stft_targs, targets_speakers in val_generator:
final_loss, recon_loss, recon_loss_0, content_loss, f0_loss, vuv_loss, summary_str = self.validate_model(feats_targs, stft_targs, targets_speakers, self.sess)
val_final_loss+=final_loss
val_recon_loss+=recon_loss
val_recon_0_loss+=recon_loss_0
val_content_loss+=content_loss
val_f0_loss+=f0_loss
val_vuv_loss+=vuv_loss
self.val_summary_writer.add_summary(summary_str, epoch)
self.val_summary_writer.flush()
utils.progress(batch_num,config.autovc_batches_per_epoch_val, suffix = 'validation done')
batch_num+=1
val_final_loss = val_final_loss/batch_num
val_recon_loss = val_recon_loss/batch_num
val_recon_0_loss = val_recon_0_loss/batch_num
val_content_loss = val_content_loss/batch_num
val_f0_loss = val_f0_loss/batch_num
val_vuv_loss = val_vuv_loss/batch_num
print_dict["Val Final Loss"] = val_final_loss
print_dict["Val Recon Loss"] = val_recon_loss
print_dict["Val Recon Loss_0 "] = val_recon_0_loss
print_dict["Val Content Loss"] = val_content_loss
print_dict["Val F0 Loss "] = val_f0_loss
print_dict["Val VUV Loss"] = val_vuv_loss
end_time = time.time()
if (epoch + 1) % config.print_every == 0:
self.print_summary(print_dict, epoch, end_time-start_time)
if (epoch + 1) % config.save_every == 0 or (epoch + 1) == config.SDN_num_epochs:
self.save_model(self.sess, epoch+1, config.SDN_log_dir)
def train_model(self,feats_targs, stft_targs, targets_speakers, sess):
"""
Function to train the model for each epoch
"""
feed_dict = {self.input_placeholder: feats_targs[:,:,:64], self.stft_placeholder: stft_targs, self.speaker_labels:targets_speakers, self.speaker_labels_1:targets_speakers,\
self.f0_placeholder: feats_targs[:,:,-2:-1], self.vuv_placeholder: feats_targs[:,:,-1:], self.is_train: True}
_,_,_, final_loss, recon_loss, recon_loss_0, content_loss, f0_loss, vuv_loss = sess.run([self.final_train_function, self.f0_train_function, self.vuv_train_function, self.final_loss, self.recon_loss, self.recon_loss_0, self.content_loss, self.f0_loss, self.vuv_loss], feed_dict=feed_dict)
summary_str = sess.run(self.summary, feed_dict=feed_dict)
return final_loss, recon_loss, recon_loss_0, content_loss, f0_loss, vuv_loss, summary_str
def validate_model(self,feats_targs, stft_targs, targets_speakers, sess):
"""
Function to train the model for each epoch
"""
feed_dict = {self.input_placeholder: feats_targs[:,:,:64], self.stft_placeholder: stft_targs, self.speaker_labels:targets_speakers, self.speaker_labels_1:targets_speakers,\
self.f0_placeholder: feats_targs[:,:,-2:-1], self.vuv_placeholder: feats_targs[:,:,-1:], self.is_train: False}
final_loss, recon_loss, recon_loss_0, content_loss, f0_loss, vuv_loss = sess.run([self.final_loss, self.recon_loss, self.recon_loss_0, self.content_loss, self.f0_loss, self.vuv_loss], feed_dict=feed_dict)
summary_str = sess.run(self.summary, feed_dict=feed_dict)
return final_loss, recon_loss, recon_loss_0, content_loss, f0_loss, vuv_loss, summary_str
def read_hdf5_file(self, file_name):
"""
Function to read and process input file, given name and the synth_mode.
Returns features for the file based on mode (0 for hdf5 file, 1 for wav file).
Currently, only the HDF5 version is implemented.
"""
# if file_name.endswith('.hdf5'):
with h5py.File(os.path.join(config.feats_dir,file_name), "r") as hdf5_file:
mel = hdf5_file['feats'][()]
stfts = hdf5_file['stfts'][()]
f0 = mel[:,-2]
med = np.median(f0[f0 > 0])
f0[f0==0] = med
mel[:,-2] = f0
return mel, stfts
def read_wav_file(self, file_name):
audio, fs = librosa.core.load(file_name, sr=config.fs)
audio = np.float64(audio)
if len(audio.shape) == 2:
vocals = np.array((audio[:,1]+audio[:,0])/2)
else:
vocals = np.array(audio)
voc_stft = abs(np.array(utils.stft(audio, hopsize=config.hopsize, nfft=config.framesize, fs=config.fs)))
feats = sig_process.get_world_feats(audio)
voc_stft = np.clip(voc_stft, 0.0, 1.0)
return feats, voc_stft
def extract_feature_file(self, file_name):
"""
Function to extract multi pitch from file. Currently supports only HDF5 files.
"""
mel, stft = self.read_wav_file(file_name)
out_feats = self.extract_feature(stft, self.sess)
out_feats = out_feats[:stft.shape[0]]
return out_feats
def extract_feature_wav(self, audio):
"""
Function to extract multi pitch from file. Currently supports only HDF5 files.
"""
stft = abs(np.array(utils.stft(audio, hopsize=config.hopsize, nfft=config.framesize, fs=config.fs)))
out_feats = self.extract_feature(stft, self.sess)
out_feats = out_feats[:stft.shape[0]]
return out_feats
def test_file_wav(self, file_name, speaker_index):
"""
Function to extract multi pitch from file. Currently supports only HDF5 files.
"""
mel, stft = self.read_wav_file(file_name)
out_mel, out_f0, out_vuv = self.process_file(stft, speaker_index, self.sess)
plot_dict = {"Spec Envelope": {"gt": mel[:,:-6], "op": out_mel[:,:-4]}, "Aperiodic":{"gt": mel[:,-6:-2], "op": out_mel[:,-4:]},\
"F0": {"gt": mel[:,-2], "op": out_f0}, "Vuv": {"gt": mel[:,-1], "op": out_vuv}}
self.plot_features(plot_dict)
synth = utils.query_yes_no("Synthesize output? ")
file_name = file_name.split('/')[-1]
if synth:
gen_change = utils.query_yes_no("Change in gender? ")
if gen_change:
female_male = utils.query_yes_no("Female to male?")
if female_male:
out_featss = np.concatenate((out_mel, out_f0-12, out_vuv), axis = -1)
else:
out_featss = np.concatenate((out_mel, out_f0+12, out_vuv), axis = -1)
else:
out_featss = np.concatenate((out_mel, out_f0, out_vuv), axis = -1)
audio_out = sig_process.feats_to_audio(out_featss)
sf.write(os.path.join(config.output_dir,'{}_{}_SDN.wav'.format(file_name[:-4], config.singers[speaker_index])), audio_out, config.fs)
synth_ori = utils.query_yes_no("Synthesize ground truth with vocoder? ")
if synth_ori:
audio = sig_process.feats_to_audio(mel)
sf.write(os.path.join(config.output_dir,'{}_ori.wav'.format(file_name[:-4])), audio, config.fs)
def test_file_wav(self, file_name, speaker_index):
"""
Function to extract multi pitch from file. Currently supports only HDF5 files.
"""
mel, stft = self.read_wav_file(file_name)
out_mel, out_f0, out_vuv = self.process_file(stft, speaker_index, self.sess)
plot_dict = {"Spec Envelope": {"gt": mel[:,:-6], "op": out_mel[:,:-4]}, "Aperiodic":{"gt": mel[:,-6:-2], "op": out_mel[:,-4:]},\
"F0": {"gt": mel[:,-2], "op": out_f0}, "Vuv": {"gt": mel[:,-1], "op": out_vuv}}
self.plot_features(plot_dict)
synth = utils.query_yes_no("Synthesize output? ")
file_name = file_name.split('/')[-1]
if synth:
gen_change = utils.query_yes_no("Change in gender? ")
if gen_change:
female_male = utils.query_yes_no("Female to male?")
if female_male:
out_featss = np.concatenate((out_mel, out_f0-12, out_vuv), axis = -1)
else:
out_featss = np.concatenate((out_mel, out_f0+12, out_vuv), axis = -1)
else:
out_featss = np.concatenate((out_mel, out_f0, out_vuv), axis = -1)
audio_out = sig_process.feats_to_audio(out_featss)
sf.write(os.path.join(config.output_dir,'{}_{}_SDN.wav'.format(file_name[:-4], config.singers[speaker_index])), audio_out, config.fs)
synth_ori = utils.query_yes_no("Synthesize ground truth with vocoder? ")
if synth_ori:
audio = sig_process.feats_to_audio(mel)
sf.write(os.path.join(config.output_dir,'{}_ori.wav'.format(file_name[:-4])), audio, config.fs)
def test_file_wav_f0(self, file_name, f0_file, speaker_index):
"""
Function to extract multi pitch from file. Currently supports only HDF5 files.
"""
mel, stft = self.read_wav_file(file_name)
f0 = midi_process.open_f0_file(f0_file)
timestamps = np.arange(0, len(mel)*config.hoptime, config.hoptime)
f1 = vamp_notes.note2traj(f0, timestamps)
f1 = sig_process.process_pitch(f1[:,0])
out_mel, out_f0, out_vuv = self.process_file(stft, speaker_index, self.sess)
plot_dict = {"Spec Envelope": {"gt": mel[:,:-6], "op": out_mel[:,:-4]}, "Aperiodic":{"gt": mel[:,-6:-2], "op": out_mel[:,-4:]},\
"F0": {"gt": f1[:,0], "op": out_f0}, "Vuv": {"gt": mel[:,-1], "op": out_vuv}}
self.plot_features(plot_dict)
synth = utils.query_yes_no("Synthesize output? ")
file_name = file_name.split('/')[-1]
if synth:
out_featss = np.concatenate((out_mel[:f1.shape[0]], f1), axis = -1)
audio_out = sig_process.feats_to_audio(out_featss)
sf.write(os.path.join(config.output_dir,'{}_{}_SDN_f0_{}.wav'.format(file_name[:-4], config.singers[speaker_index], f0_file.split('/')[-1])), audio_out, config.fs)
synth_ori = utils.query_yes_no("Synthesize ground truth with vocoder? ")
if synth_ori:
audio = sig_process.feats_to_audio(mel)
sf.write(os.path.join(config.output_dir,'{}_ori.wav'.format(file_name[:-4])), audio, config.fs)
def test_file_hdf5(self, file_name, speaker_index_2):
"""
Function to extract multi pitch from file. Currently supports only HDF5 files.
"""
mel, stft = self.read_hdf5_file(file_name)
out_mel, out_f0, out_vuv = self.process_file(stft, speaker_index_2, self.sess)
plot_dict = {"Spec Envelope": {"gt": mel[:,:-6], "op": out_mel[:,:-4]}, "Aperiodic":{"gt": mel[:,-6:-2], "op": out_mel[:,-4:]},\
"F0": {"gt": mel[:,-2], "op": out_f0}, "Vuv": {"gt": mel[:,-1], "op": out_vuv}}
self.plot_features(plot_dict)
synth = utils.query_yes_no("Synthesize output? ")
if synth:
gen_change = utils.query_yes_no("Change in gender? ")
if gen_change:
female_male = utils.query_yes_no("Female to male?")
if female_male:
out_featss = np.concatenate((out_mel[:mel.shape[0]], mel[:out_mel.shape[0],-2:-1]-12, mel[:out_mel.shape[0],-1:]), axis = -1)
else:
out_featss = np.concatenate((out_mel[:mel.shape[0]], mel[:out_mel.shape[0],-2:-1]+12, mel[:out_mel.shape[0],-1:]), axis = -1)
else:
out_featss = np.concatenate((out_mel[:mel.shape[0]], mel[:out_mel.shape[0],-2:-1], mel[:out_mel.shape[0],-1:]), axis = -1)
audio_out = sig_process.feats_to_audio(out_featss)
sf.write(os.path.join(config.output_dir,'{}_{}_SDN.wav'.format(file_name[:-4], config.singers[speaker_index_2])), audio_out, config.fs)
synth_ori = utils.query_yes_no("Synthesize ground truth with vocoder? ")
if synth_ori:
audio = sig_process.feats_to_audio(mel)
sf.write(os.path.join(config.output_dir,'{}_ori.wav'.format(file_name[:-4])), audio, config.fs)
def process_file(self, mel, speaker_index_2, sess):
datasets = "".join("_"+x.lower() for x in config.datasets)
with h5py.File(config.stat_file, mode='r') as stat_file:
max_feat = stat_file["feats_maximus"][()] + 0.001
min_feat = stat_file["feats_minimus"][()] - 0.001
mel = np.clip(mel, 0.0, 1.0)
in_batches_mel, nchunks_in = utils.generate_overlapadd(mel)
out_batches_mel = []
out_batches_f0 = []
out_batches_vuv = []
for in_batch_mel in in_batches_mel :
speaker_2 = np.repeat(speaker_index_2, config.batch_size)
feed_dict = {self.stft_placeholder: in_batch_mel, self.speaker_labels_1:speaker_2, self.is_train: False}
mel, f0, vuv = sess.run([self.output_stft, self.f0, self.vuv], feed_dict=feed_dict)
out_batches_mel.append(mel)
out_batches_f0.append(f0)
out_batches_vuv.append(vuv)
out_batches_mel = np.array(out_batches_mel)
out_batches_f0 = np.array(out_batches_f0)
out_batches_vuv = np.array(out_batches_vuv)
out_batches_mel = utils.overlapadd(out_batches_mel,nchunks_in)
out_batches_f0 = utils.overlapadd(out_batches_f0,nchunks_in)
out_batches_vuv = utils.overlapadd(out_batches_vuv,nchunks_in)
out_batches_mel = out_batches_mel*(max_feat[:-2] - min_feat[:-2]) + min_feat[:-2]
out_batches_f0 = out_batches_f0*(max_feat[-2] - min_feat[-2]) + min_feat[-2]
out_batches_vuv = out_batches_vuv*(max_feat[-1] - min_feat[-1]) + min_feat[-1]
out_batches_vuv = np.round(out_batches_vuv)
return out_batches_mel, out_batches_f0, out_batches_vuv
def extract_feature(self, mel, sess):
datasets = "".join("_"+x.lower() for x in config.datasets)
mel = np.clip(mel, 0.0, 1.0)
in_batches_mel, nchunks_in = utils.generate_overlapadd(mel)
out_batches_mel = []
for in_batch_mel in in_batches_mel :
feed_dict = {self.stft_placeholder: in_batch_mel, self.is_train: False}
mel = sess.run(self.content_embedding_stft, feed_dict=feed_dict)
out_batches_mel.append(mel)
out_batches_mel = np.array(out_batches_mel)
out_batches_mel = utils.overlapadd(out_batches_mel,nchunks_in)
return out_batches_mel
def model(self):
"""
The main model function, takes and returns tensors.
Defined in modules.
"""
with tf.variable_scope('encoder') as scope:
self.content_embedding_1 = modules_autovc.content_encoder(self.input_placeholder, self.speaker_onehot_labels, self.is_train)
with tf.variable_scope('decoder') as scope:
self.output_1 = modules_autovc.decoder(self.content_embedding_1, self.speaker_onehot_labels_1, self.is_train)
with tf.variable_scope('post_net') as scope:
self.residual = modules_autovc.post_net(self.output_1, self.is_train)
self.output = self.output_1 + self.residual
with tf.variable_scope('encoder') as scope:
scope.reuse_variables()
self.content_embedding_2 = modules_autovc.content_encoder(self.output, self.speaker_onehot_labels, self.is_train)
with tf.variable_scope('stft_encoder') as scope:
self.content_embedding_stft = modules_SDN.content_encoder_stft(self.stft_placeholder, self.is_train)
with tf.variable_scope('stft_decoder') as scope:
self.output_stft_1 = modules_autovc.decoder(self.content_embedding_stft, self.speaker_onehot_labels_1, self.is_train)
with tf.variable_scope('stft_post_net') as scope:
self.residual_stft = modules_autovc.post_net(self.output_stft_1, self.is_train)
self.output_stft = self.output_stft_1 + self.residual_stft
with tf.variable_scope('encoder') as scope:
scope.reuse_variables()
self.content_embedding_stft_2 = modules_autovc.content_encoder(self.output_stft, self.speaker_onehot_labels, self.is_train)
with tf.variable_scope('F0_Model') as scope:
self.f0 = modules_SDN.enc_dec_f0(self.stft_placeholder, self.output_stft[:,:,:-4], self.output_stft[:,:,-4:], self.is_train)
with tf.variable_scope('Vuv_Model') as scope:
self.vuv = modules_SDN.enc_dec_vuv(self.stft_placeholder, self.output_stft[:,:,:-4], self.output_stft[:,:,-4:], self.f0, self.is_train) |
py | b40244e78857ee28cef8c93380a7215baddec8a4 | from lib.base import PdBaseAction
""" This action prepares data for base logic in lib/base.py
Your action.yaml can reference find(), fetch(), delete(), and create() directly.
Any other method will fall through to else, and be passed literally to lib/base.py.
other methods should match up to a method in pypd.
"""
class PdAction(PdBaseAction):
""" Pagerduty run action
"""
def run(self, entity=None, method=None, **kwargs):
""" Run action and call appropriate method
"""
# Run a couple checks in PdBaseAction() to make sure global required
# data is present
self.check_entity(entity)
self.check_method(method)
# Other data validation checks are done in PdBaseAction() methods
# Well known pypd methods in pypd.entity
if method == 'find': # HTTP_GET
self.logger.debug('Running a find() method')
return (True, self.find(entity=entity, **kwargs))
elif method == 'fetch': # HTTP_GET
self.logger.debug('Running a fetch() method')
# We need to know the id of the resource we are fetching.
# Define 'entity_id' in your action
entity_id = str(kwargs.pop('entity_id', None))
self.logger.debug(
'Extracting entity_id from kwargs: {}'.format(entity_id))
return (True, self.fetch(
entity=entity, entity_id=entity_id, **kwargs))
elif method == 'nested_fetch': # HTTP_GET
# Since some fetch() methods are customized by pypd we need to handle
# them differently sometimes. In this case its a normal fetch, just
# against a nested resource. For an example
# see PD API reference -
# get_services_id_integrations_integration_id
self.logger.debug(
'Running a fetch() method against a nested resource')
# override actual method
self.logger.debug('Rewritting nested_fetch to fetch for method')
method = 'fetch'
self.logger.debug('Extracting entity_id from kwargs as service: {}'.format(
kwargs.get('resource_id', None)))
kwargs['service'] = kwargs.pop('entity_id', None)
# We need to know the id of the resource we are fetching.
# Define 'entity_id' in your action
entity_id = str(kwargs.pop('resource_id', None))
self.logger.debug(
'Extracting resource_id from kwargs as integration: {}'.format(entity_id))
return (True, self.fetch(
entity=entity, entity_id=entity_id, **kwargs))
elif method == 'delete': # HTTP_DELETE
self.logger.debug('Running a delete() method')
# We need to know the id of the resource we are deleting.
# Define 'entity_id' in your action
entity_id = str(kwargs.pop('entity_id', None))
self.logger.debug(
'Extracting entity_id from kwargs: {}'.format(entity_id))
return (True, self.delete(
entity=entity, entity_id=entity_id, **kwargs))
elif method == 'create': # HTTP_POST
self.logger.debug('Running a create() method')
from_email = str(kwargs.pop('from_email', None))
self.logger.debug(
'Extracting from_email from kwargs: {}'.format(from_email))
# data should be a JSON object with a defined JSONschema in the
# action to enforce API compliance.
data = kwargs.pop('data', None)
self.logger.debug('Extracting data from kwargs: {}'.format(data))
return (True, self.create(
entity=entity, from_email=from_email, payload=data, **kwargs))
# If there ends up being a specific method that needs some special handling
# you can either add another `elif` condition here. You COULD create a
# separate `specfic_action.py` that instantiates PdBaseAction() directly,
# but it's preferable for consistency to keep direct action logic here.
# elif method == '<another>':
# ...
# other entity_id based methods
else:
self.logger.debug('Running an entity_id specific method')
# We need to know the entity_id of the resource to interact with
entity_id = str(kwargs.pop('entity_id', None))
self.logger.debug(
'Extracting entity_id from kwargs: {}'.format(entity_id))
return (True, self.entity_id_method(
entity=entity, method=method, entity_id=entity_id, **kwargs))
|
py | b402471d625c8b5d1d0854774fe4fb3a25cf2edc | """
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0; -20 is not valid.
No change was made::
>>> raisins.weight
10
The value of the attributes managed by the descriptors are stored in
alternate attributes, created by the descriptors in each ``LineItem``
instance::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['_Check#0', '_Check#1', '_Check#2', '__class__', ...
'description', 'price', 'subtotal', 'weight']
>>> [getattr(raisins, name) for name in dir(raisins) if name.startswith('_Check#')]
['Golden raisins', 10, 6.95]
If the descriptor is accessed in the class, the descriptor object is
returned:
>>> LineItem.weight # doctest: +ELLIPSIS
<model_v5_check.Check object at 0x...>
>>> LineItem.weight.storage_name
'_Check#1'
The `NonBlank` descriptor prevents empty or blank strings to be used
for the description:
>>> br_nuts = LineItem('Brazil Nuts', 10, 34.95)
>>> br_nuts.description = ' '
Traceback (most recent call last):
...
ValueError: ' ' is not valid.
>>> void = LineItem('', 1, 1)
Traceback (most recent call last):
...
ValueError: '' is not valid.
"""
import model_v5_check as model
def gt_zero(x):
'''value must be > 0'''
return x if x > 0 else model.INVALID
def non_blank(txt):
txt = txt.strip()
return txt if txt else model.INVALID
class LineItem:
description = model.Check(non_blank)
weight = model.Check(gt_zero)
price = model.Check(gt_zero)
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
|
py | b402477d5ed9cb78aea855cab8d1e1af5b911409 | """Turn BLE discovery on or off."""
from gatterserver.models.gatterbasemodel import GatterBaseModel
class DiscoveryCommand(GatterBaseModel):
"""Turn BLE discovery on or off."""
discovery: bool
|
py | b402487e8820a1ecaca892491600cda9ff1eb471 | import asyncio
import pytest
from chatty.main import create_app
@pytest.fixture
def loop():
return asyncio.get_event_loop()
@pytest.fixture
async def client(aiohttp_client):
return await aiohttp_client(create_app())
|
py | b40248b672fce242192fb435e67559c372117eeb | # Generated by Django 2.2.16 on 2020-11-27 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("goods", "0018_merge_20201127_1102"),
]
operations = [
migrations.AlterField(
model_name="firearmgooddetails",
name="no_proof_mark_details",
field=models.TextField(
blank=True,
default="",
help_text="The reason why `has_proof_mark` is False (which should normally be True).",
),
),
]
|
py | b40248f06b77c3a83f90e57ecafe03233627c4dd | from typing import List, Tuple, Union
from vowpalwabbit import pyvw
from movado.mab_handler import MabHandler
import random
class MabHandlerCB(MabHandler):
def __init__(
self,
arms: int,
cover: int = 3,
debug: bool = False,
controller_params: dict = None,
debug_path: str = "mab",
skip_debug_initialization: bool = False,
):
super().__init__(
debug,
controller_params=controller_params,
debug_path=debug_path,
skip_debug_initialization=skip_debug_initialization,
)
self._mab = pyvw.vw(
"--cb_explore "
+ str(arms)
+ " --cover "
+ str(cover)
+ " --quiet"
+ " --random_seed 0"
)
def predict(
self, context: List[float], probability: bool = False
) -> Union[int, Tuple[int, float]]:
context_str: str = "| "
for feature in context:
context_str += str(feature) + " "
context_str.strip()
prediction: Tuple[int, float] = self.sample_probability_mass_function(
self._mab.predict(context_str)
)
self._last_predict_probability = prediction[1]
self._last_action = prediction[0]
if probability:
return (
(prediction[0], self._last_predict_probability)
if self._last_action == 0
else (prediction[0], 1 - self._last_predict_probability)
)
return prediction[0]
@staticmethod
def sample_probability_mass_function(
probability_mass_function: List[float],
) -> Tuple[int, float]:
total = sum(probability_mass_function)
scale = 1 / total
probability_mass_function = [x * scale for x in probability_mass_function]
draw = random.random()
sum_prob = 0.0
for index, prob in enumerate(probability_mass_function):
sum_prob += prob
if sum_prob > draw:
return index, prob
|
py | b40249661fbbc8d1473573fe677c4a19575ac38e | from .env import *
from .version import __version__
from persia import logger as logger
from persia import prelude as prelude
from persia import ctx as ctx
from persia import sparse as sparse
from persia import data as data
from persia import error as error
from persia import service as service
from persia import utils as utils
|
py | b4024aee03a2040401239006bea8671a588511f2 | from .controller.feedhistory import *
from .controller.pond import *
from .controller.feedtype import *
def initialize_routes(api):
# pond
api.add_resource(PondsApi, '/api/ponds')
api.add_resource(PondApi, '/api/ponds/<id>')
api.add_resource(PondImageApi, '/api/ponds/image/<id>')
api.add_resource(PondImageApiDummy, '/api/ponds/image')
# feedtype
api.add_resource(FeedTypesApi, '/api/feedtypes')
api.add_resource(FeedTypeApi, '/api/feedtypes/<id>')
# feedhistory
api.add_resource(FeedHistorysApi, '/api/feedhistorys')
api.add_resource(FeedHistoryApi, '/api/feedhistorys/<id>')
api.add_resource(FeedHistoryByPond,
'/api/feedhistorysbypond')
api.add_resource(FeedHistoryByOnePond,
'/api/feedhistorysbyonepond/<id>')
|
py | b4024b349b5162e6d67400f5bfc0f1ace0a227c9 | import logging
import os
from math import ceil
from shutil import rmtree
from time import time
from mongodb_consistent_backup.Errors import OperationError
class Rotate(object):
def __init__(self, config, state_root, state_bkp):
self.config = config
self.state_root = state_root
self.state_bkp = state_bkp
self.backup_name = self.config.backup.name
self.max_backups = self.config.rotate.max_backups
self.max_days = self.config.rotate.max_days
self.previous = None
self.backups = self.backups_by_unixts()
self.latest = state_bkp.get()
self.base_dir = os.path.join(self.config.backup.location, self.config.backup.name)
self.latest_symlink = os.path.join(self.base_dir, "latest")
self.previous_symlink = os.path.join(self.base_dir, "previous")
self.max_secs = 0
if self.max_days > 0:
seconds = float(self.max_days) * 86400.00
self.max_secs = int(ceil(seconds))
def backups_by_unixts(self):
backups = {}
for name in self.state_root.backups:
backup = self.state_root.backups[name]
backup_time = backup["updated_at"]
backups[backup_time] = backup
if not self.previous or backup_time > self.previous["updated_at"]:
self.previous = backup
return backups
def remove(self, ts):
if ts in self.backups:
backup = self.backups[ts]
try:
logging.debug("Removing backup path: %s" % backup["path"])
rmtree(backup["path"])
except Exception, e:
logging.error("Unable to remove backup path %s. %s" % (backup["path"], str(e)))
raise OperationError(e)
if self.previous == backup:
self.previous = None
del self.backups[ts]
def rotate(self):
if self.max_days == 0 and self.max_backups == 0:
logging.info("Backup rotation is disabled, skipping")
return
logging.info("Rotating backups (max_backups=%i, max_days=%.2f)" % (self.max_backups, self.max_days))
kept_backups = 1
now = int(time())
remove_backups = {}
for ts in sorted(self.backups.iterkeys(), reverse=True):
backup = self.backups[ts]
name = backup["name"].encode("ascii", "ignore")
if self.max_backups == 0 or kept_backups < self.max_backups:
if self.max_secs > 0 and (now - ts) > self.max_secs:
remove_backups[name] = ts
continue
logging.debug("Keeping previous backup %s" % name)
kept_backups += 1
else:
remove_backups[name] = ts
if len(remove_backups) > 0:
logging.info("Backup(s) exceeds max backup count or age, removing: %s" % sorted(remove_backups.keys()))
for name in remove_backups:
self.remove(remove_backups[name])
def symlink(self):
try:
if os.path.islink(self.latest_symlink):
os.remove(self.latest_symlink)
logging.info("Updating %s latest symlink to current backup path: %s" % (self.backup_name, self.latest["path"]))
os.symlink(self.latest["path"], self.latest_symlink)
if os.path.islink(self.previous_symlink):
os.remove(self.previous_symlink)
if self.previous:
logging.info("Updating %s previous symlink to: %s" % (self.backup_name, self.previous["path"]))
os.symlink(self.previous["path"], self.previous_symlink)
except Exception, e:
logging.error("Error creating backup symlinks: %s" % e)
raise OperationError(e)
def run(self):
self.rotate()
self.symlink()
|
py | b4024bacfeed73bd583ba3a352c658680fe5f729 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
OFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This stack creates the S3 bucket for user data upload and configues the bucket for
cross-account access. This stack also creates the instant and hourly data upload folders
and event notification for instant upload.
"""
import os
from aws_cdk import (
core as cdk,
aws_s3 as s3,
aws_ssm as ssm,
aws_iam as iam,
aws_s3_deployment as s3_deploy,
aws_s3_notifications as s3_notifications,
aws_lambda as _lambda,
)
from aws_cdk.core import RemovalPolicy
class DataLakeResourcesStack(cdk.Stack):
def __init__(
self,
scope: cdk.Construct,
id: str,
instance: str,
rstudio_account_id: str,
datalake_source_bucket_name: str,
datalake_source_bucket_key_hourly: str,
datalake_source_bucket_key_instant: str,
lambda_datasync_trigger_function_arn: str,
**kwargs,
):
cdk.Stack.__init__(self, scope, id, **kwargs)
"""
# set removal policy objects
self.removal_policy = (
core.RemovalPolicy.DESTROY
if os.getenv("AWS_REMOVAL_POLICY", "FALSE") == "TRUE"
else core.RemovalPolicy.RETAIN
)
"""
source_bucket = s3.Bucket(
self,
id=f"rstudio-user-data-{instance}",
bucket_name=datalake_source_bucket_name,
# removal_policy=self.removal_policy,
removal_policy=RemovalPolicy.DESTROY,
versioned=True,
)
source_bucket.add_to_resource_policy(
permission=iam.PolicyStatement(
principals=[iam.AccountPrincipal(rstudio_account_id)],
effect=iam.Effect.ALLOW,
actions=[
"s3:GetBucketNotification",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:GetObject",
"s3:ListMultipartUploadParts",
"s3:PutObjectTagging",
"s3:GetObjectTagging",
"s3:PutObject",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:ListBucketMultipartUploads",
],
resources=[
source_bucket.bucket_arn,
f"{source_bucket.bucket_arn}/*",
],
)
)
s3_prefix_creation_hourly = s3_deploy.BucketDeployment(
self,
id=f"s3-prefix-deployment-hourly-{instance}",
sources=[s3_deploy.Source.asset("./dummy")],
destination_bucket=source_bucket,
destination_key_prefix=datalake_source_bucket_key_hourly,
retain_on_delete=False,
)
s3_prefix_creation_instant = s3_deploy.BucketDeployment(
self,
id=f"s3-prefix-deployment-instant-{instance}",
sources=[s3_deploy.Source.asset("./dummy")],
destination_bucket=source_bucket,
destination_key_prefix=datalake_source_bucket_key_instant,
retain_on_delete=False,
)
# Setup bucket notification to trigger lambda (in destination account) whenever a file is uploaded into the bucket
lambda_destination = s3_notifications.LambdaDestination(
_lambda.Function.from_function_arn(
self,
id=f"datasync-lambda-{instance}",
function_arn=lambda_datasync_trigger_function_arn,
)
)
source_bucket.add_event_notification(
s3.EventType.OBJECT_CREATED,
lambda_destination,
s3.NotificationKeyFilter(prefix=f"{datalake_source_bucket_key_instant}/"),
)
|
py | b4024bc8b675f4e24ab5721e8f4763a0b73dad54 | # Generated by Django 3.1 on 2020-12-01 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('content', models.TextField()),
],
),
]
|
py | b4024d84d4513279dde8eeb7b78e3491e9770d6e | users = []
class UserModel(object):
"""Class user models."""
def __init__(self):
self.db = users
def add_user(self, fname, lname, email, phone, password, confirm_password, city):
""" Method for saving user to the dictionary """
payload = {
"userId": len(self.db)+1,
"fname": fname,
"lname": lname,
"email": email,
"phone": phone,
"password": password,
"confirm_password": confirm_password,
"city": city,
}
self.db.append(payload)
return self.db
def check_email(self, email):
"""Method for checking if user email exist"""
user = [user for user in users if user['email'] == email]
if user:
return True
return False
def check_user(self, userId):
"""Method for checking if user exist"""
user = [user for user in users if user['userId'] == userId]
if user:
return True
return False
|
py | b4024d94b634136d6490f5917a9998de2f7214d7 | from core.models import ScanResult
import xlsxwriter
def export_xlsx(session):
workbook = xlsxwriter.Workbook('results.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
worksheet.set_column('A:E', 30)
worksheet.write('A1', 'Domain', bold)
worksheet.write('B1', 'IP', bold)
worksheet.write('C1', 'Port', bold)
worksheet.write('D1', 'Response Code', bold)
worksheet.write('E1', 'Owner', bold)
results = []
for i in session.query(ScanResult).all():
results.append([i.domain.name, str(i.ip.ip_address),
str(i.port), str(i.response_code), i.owner])
row, col = 1, 0
for domain, ip, port, response_code, owner in results:
worksheet.write(row, col, domain)
worksheet.write(row, col + 1, ip)
worksheet.write(row, col + 2, port)
worksheet.write(row, col + 3, response_code)
worksheet.write(row, col + 4, owner)
row += 1
|
py | b4024d9bb5522aaf4603623e34a7c34103d2a0f0 | """
Fast Fourier Transformation
"""
import numpy as np
import pandas as pd
from hyperopt import hp, fmin, tpe, Trials
# local module
from foresee.models import models_util
from foresee.models import param_optimizer
from foresee.scripts import fitter
def _reconstruct_signal(
n_periods,
forecast_len,
fft_model,
ft_sample_frequencies,
fft_terms_for_reconstruction,
linear_trend
):
"""[summary]
Parameters
----------
n_periods : [type]
[description]
forecast_len : [type]
[description]
fft_model : [type]
[description]
ft_sample_frequencies : [type]
[description]
fft_terms_for_reconstruction : [type]
[description]
linear_trend : [type]
[description]
Returns
-------
[type]
[description]
"""
pi = np.pi
t = np.arange(0, n_periods+forecast_len)
restored_sig = np.zeros(t.size)
for i in fft_terms_for_reconstruction:
ampli = np.absolute(fft_model[i]) / n_periods
phase = np.angle(
fft_model[i],
deg = False
)
restored_sig += ampli * np.cos(2 * pi * ft_sample_frequencies[i] * t + phase)
return restored_sig + linear_trend[0] * t
def fft_fit_forecast(ts, fcst_len, params=None, args=None):
"""[summary]
Parameters
----------
ts : [type]
[description]
fcst_len : [type]
[description]
n_harmonics : [type]
[description]
Returns
-------
[type]
[description]
"""
try:
ts_len = len(ts)
n_harmonics = params['n_harmonics']
t = np.arange(0, ts_len)
linear_trend = np.polyfit(t, ts, 1)
training_endog_detrend = ts - linear_trend[0] * t
fft_model = np.fft.fft(training_endog_detrend)
indexes = list(range(ts_len))
# sort by amplitude
indexes.sort(
key = lambda i: np.absolute(fft_model[i]) / ts_len,
reverse = True
)
fft_terms_for_reconstruction = indexes[:1 + n_harmonics * 2]
ft_sample_frequencies = np.fft.fftfreq(
n = ts_len,
d = 1
)
fft_fit_forecast = _reconstruct_signal(
n_periods = ts_len,
forecast_len = fcst_len,
fft_model = fft_model,
ft_sample_frequencies = ft_sample_frequencies,
fft_terms_for_reconstruction = fft_terms_for_reconstruction,
linear_trend = linear_trend
)
fft_fit_forecast = pd.Series(fft_fit_forecast)
fft_fittedvalues = fft_fit_forecast[:-(fcst_len)]
fft_forecast = fft_fit_forecast[-(fcst_len):]
err = None
except Exception as e:
fft_fittedvalues = None
fft_forecast = None
err = str(e)
return fft_fittedvalues, fft_forecast, err
def fft_tune(ts_train, ts_test, params=None, args=None):
model = 'fft'
try:
if params is None:
space = hp.choice('n_harmonics', [nh for nh in range(2, 20)])
else:
nh_ub = params['n_harmonics']
space = hp.choice('n_harmonics', [nh for nh in range(2, nh_ub)])
f = fitter.model_loss(model)
f_obj = lambda params: f.fit_loss(ts_train, ts_test, params, args)
trials = Trials()
best = fmin(f_obj, space, algo=tpe.suggest, trials=trials, max_evals=100, show_progressbar=False, verbose=False)
err = None
except Exception as e:
err = str(e)
best = None
return best, err
def fft_main(data_dict, param_config, model_params):
"""[summary]
Parameters
----------
data_dict : [type]
[description]
model_params : [type]
[description]
Returns
-------
[type]
[description]
"""
model = 'fft'
fcst_len = param_config['FORECAST_LEN']
output_format = param_config['OUTPUT_FORMAT']
tune = param_config['TUNE']
epsilon = param_config['EPSILON']
complete_fact = data_dict['complete_fact']
# dataframe to hold fitted values
fitted_fact = pd.DataFrame()
fitted_fact['y'] = complete_fact['y']
fitted_fact['data_split'] = complete_fact['data_split']
# dataframe to hold forecast values
forecast_fact = pd.DataFrame()
forecast_fact['y'] = np.full(fcst_len, 0)
forecast_fact['data_split'] = np.full(fcst_len, 'Forecast')
fit_fcst_fact = pd.concat([fitted_fact, forecast_fact], ignore_index=True)
fit_args = dict()
# no model competition
if output_format in ['all_models']:
params = {'n_harmonics': 5}
fitted_values, forecast, err = fft_fit_forecast(
ts = complete_fact['y'],
fcst_len = fcst_len,
params = params,
args = None
)
if err is None:
fit_fcst_fact['fft_forecast'] = fitted_values.append(forecast).values
else:
fit_fcst_fact['fft_forecast'] = 0
fit_args['err'] = err
fit_args['n_harmonics'] = 5
# with model completition
if output_format in ['best_model', 'all_best']:
train_fact = data_dict['train_fact']
test_fact = data_dict['test_fact']
if tune:
# TODO: add logic when optimization fails
model_params = model_params[model]
params, err = param_optimizer.tune(train_fact, test_fact, model, params=model_params)
fit_args['tune_err'] = err
else:
params = {'n_harmonics': 5}
training_fitted_values, holdout_forecast, training_err = fft_fit_forecast(
ts = train_fact['y'],
fcst_len = len(test_fact),
params = params
)
complete_fitted_values, complete_forecast, complete_err = fft_fit_forecast(
ts = complete_fact['y'],
fcst_len = fcst_len,
params = params
)
if training_err is None and complete_err is None:
fft_wfa = models_util.compute_wfa(
y = test_fact['y'].values,
yhat = holdout_forecast.values,
epsilon = epsilon,
)
fft_fit_fcst = training_fitted_values.append(holdout_forecast, ignore_index=True).append(complete_forecast, ignore_index=True)
fit_fcst_fact['fft_forecast'] = fft_fit_fcst.values
fit_fcst_fact['fft_wfa'] = fft_wfa
else:
fft_wfa = -1
fit_fcst_fact['fft_forecast'] = 0
fit_fcst_fact['fft_wfa'] = -1
fit_args['err'] = (training_err, complete_err)
fit_args['wfa'] = fft_wfa
fit_args['n_harmonics'] = params['n_harmonics']
return fit_fcst_fact, fit_args
|
py | b4024da6c2b0169d67d1a5fe9c1deae9f1b68099 | """
Sample Solution for Lab3
Use "run.py [--sim] lab3_solution" to execute
"""
import create2
import math
import odometry
class Run:
def __init__(self, create, time, sonar, servo):
"""Constructor.
Args:
create (robot.Create2Driver)
time (time)
sonar (robot.Sonar)
servo (robot.Servo)
"""
self.create = create
self.time = time
self.sonar = sonar
self.servo = servo
self.odometry = odometry.Odometry()
def sleep(self, time_in_sec):
"""Sleeps for the specified amount of time while keeping odometry up-to-date
Args:
time_in_sec (float): time to sleep in seconds
"""
start = self.time.time()
while True:
state = self.create.update()
if state is not None:
self.odometry.update(state.leftEncoderCounts, state.rightEncoderCounts)
print("[{},{},{}]".format(self.odometry.x, self.odometry.y, math.degrees(self.odometry.theta)))
t = self.time.time()
if start + time_in_sec <= t:
break
def run(self):
self.create.start()
self.create.safe()
# request sensors
self.create.start_stream([
create2.Sensor.LeftEncoderCounts,
create2.Sensor.RightEncoderCounts,
])
self.create.drive_direct(100, 100)
self.sleep(5)
self.create.drive_direct(100, -100)
self.sleep(2.0)
|
py | b4024e234aec30273b0f59c215de78474edc3ed3 | import os
directory = os.path.expanduser('~/Desktop/switch/switch-engine/')
from bs4 import BeautifulSoup
import csv
index = []
for file in os.listdir(directory):
doc=open(os.path.join(directory,file),'rb')
soup=BeautifulSoup(doc,'html.parser', from_encoding="windows-1252")
title=soup.find(bgcolor='#666666').get_text(":", strip=True)
author =soup.find(class_='DBoutput').find('table').find('a').get_text(":", strip=True)
issue =soup.find(class_='DBoutput').find('table').find(align='right').find('a').get_text(":", strip=True)
date =soup.find(class_='DBoutput').tr.next_sibling.next_sibling.a.next_sibling.replace('\n', '').replace('\r','').replace(' on ','')
item =[title, author, issue, date, file]
index.append(item)
with open("out.csv", "w", encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerows(index)
|
py | b4024f9e33897c598952d0a2848f01b0bb691474 | # --------
# Generating a private key from a random number
# --------
# source: Chapter 4 Bitcoin Book Andreas Antonopoulos
# Pick a number between 0 and n - 1, any method that is certainly unpredictable
# where n = 2^256 - 2^32 - 2^9 - 2^8 - 2^7 - 2^6 - 2^4 - 1
# (slightly less than 2^256)
# Bitcoin software uses the computer's operating system
# to generate a random number, usually generator is initialized
# with human source or randomness, like wiggling your mouse
# private key can be any number between 0 and n - 1 inclusive
# Method:
# Find a cryptographically secure source of entropy randomness
# Randomly pick 256-bit number repeatly until it's less than n
# Randomly picking:
# feed a larger string or random bits from the secure source
# into sha256 algorithm with conviniently produces a 256 number
import hashlib
import secrets
n = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1
private_key = 0 # will be bytes
while True:
h = hashlib.sha256()
h.update(secrets.token_bytes(32)) # 32 bytes / 256 bits
private_key = h.digest()
m = int.from_bytes(private_key, byteorder="big")
if m < n :
break
print("\n-----")
print("PRIVATE KEY")
print("-----")
print("\nIn bytes:\n", private_key)
print("\nIn hex:\n", private_key.hex())
print("\n-----")
|
py | b40250841e9923f2e8fe3605545d15afdf05fafd | # Generated by Django 3.1.13 on 2021-09-28 11:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nomenclatoare', '0014_historicalmaterialestructura_materialestructura'),
('app', '0053_auto_20210927_1516'),
]
operations = [
migrations.AddField(
model_name='bisericapage',
name='adresa',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='bisericapage',
name='conservare',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='bisericapage',
name='latitudine',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='bisericapage',
name='localitate',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pp_biserici', to='nomenclatoare.localitate'),
),
migrations.AddField(
model_name='bisericapage',
name='longitudine',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='bisericapage',
name='prioritizare',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='bisericapage',
name='valoare',
field=models.CharField(blank=True, max_length=5, null=True),
),
migrations.AddField(
model_name='identificarepage',
name='judet',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ppp_biserici', to='nomenclatoare.judet'),
),
]
|
py | b40250d7b755f2be95c0e53f542b9c6ef0496d12 | # Generated by Django 3.1.1 on 2020-10-11 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0008_auto_20201011_1918'),
]
operations = [
migrations.AlterField(
model_name='order',
name='order_total',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=100),
),
migrations.AlterField(
model_name='order',
name='shipping_price',
field=models.DecimalField(decimal_places=2, default=50.0, max_digits=100),
),
]
|
py | b402513d2b96875039abe84d189d75ed00ca5bef | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torch import nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for block_stride in strides:
layers.append(block(self.in_planes, planes, block_stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.maxpool(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
|
py | b40255bf932d6419d90ebc3443cdcc62ae853857 | #!/usr/bin/python3
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import random
import board
import strategy
SAMPLE_EVERY = 0
def find_disagreement():
print("Reading stores")
our_store = strategy.DistributionStore.load_hdf5('data/bgend_store_15_6.hdf5')
their_store = strategy.DistributionStore.load_hdf5('data/gnubg_store_15_6.hdf5')
config = our_store.config
print("Starting analysis")
move_disagreements = []
boards_examined = 0
progress_indicator = strategy.ProgressIndicator(config.num_valid_boards, 500)
for board_id in our_store.distribution_map:
progress_indicator.complete_one()
if SAMPLE_EVERY and np.random.randint(0, SAMPLE_EVERY) > 0:
continue
boards_examined += 1
b = board.Board.from_id(config, board_id)
our_mcd = our_store.distribution_map[board_id]
their_mcd = their_store.distribution_map[board_id]
for roll in board.ROLLS:
our_moves = our_store.compute_best_moves_for_roll(b, roll)
their_moves = their_store.compute_best_moves_for_roll(b, roll)
our_board = b.apply_moves(our_moves)
their_board = b.apply_moves(their_moves)
if our_board.get_id() == their_board.get_id():
continue
our_moves_our_ev = our_store.distribution_map[our_board.get_id()].expected_value()
our_moves_their_ev = their_store.distribution_map[our_board.get_id()].expected_value()
their_moves_our_ev = our_store.distribution_map[their_board.get_id()].expected_value()
their_moves_their_ev = their_store.distribution_map[their_board.get_id()].expected_value()
move_disagreements.append( (b.get_id(),
roll.dice[0], roll.dice[1],
board.encode_moves_string(our_moves),
our_moves_our_ev,
our_moves_their_ev,
board.encode_moves_string(their_moves),
their_moves_our_ev,
their_moves_their_ev,
)
)
print("Examined {} boards, found {} disagreement".format(boards_examined, len(move_disagreements)))
df = pd.DataFrame.from_records(data=move_disagreements,
index=None,
columns=["board_idx", "roll0", "roll1",
"our_moves", "our_moves_our_ev", "our_moves_their_ev",
"their_moves", "their_moves_our_ev", "their_moves_their_ev"],
)
df.to_csv("data/disagreements.csv")
if __name__ == '__main__':
find_disagreement()
|
py | b40255c8e10a400c4821f9fc9c909746223ab1db | import io
import json
import logging
import platform
import subprocess
import sys
import threading
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
logger = logging.getLogger(__name__)
class JSON:
@staticmethod
def read(path):
with open(path) as file:
return json.load(file)
class YAML:
@staticmethod
def read(path):
with open(path) as file_:
return yaml.load(file_, Loader=Loader)
def run(command, directory):
def _exit(process, stream):
process.wait()
logger.debug('%s returned %d', process.args, process.returncode)
error = stream.read()
if error != '':
logger.error(error)
process = subprocess.Popen(
command, cwd=directory, shell=not platform.system() == 'Windows',
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
ostream = io.TextIOWrapper(process.stdout, errors='replace')
estream = io.TextIOWrapper(process.stderr, errors='replace')
thread = threading.Thread(target=_exit, args=(process, estream,))
thread.start()
return ostream, thread
def to_stderr(text):
sys.stderr.write('{}\n'.format(text))
def to_stdout(text):
sys.stdout.write('{}\n'.format(text))
|
py | b40256cb11e58fa7d849f5c9f54a89b4f1866649 | #!/usr/bin/python3.6
# some Python 3.6 features
# file:///usr/share/doc/python3.7/html/whatsnew/3.6.html
# -- PEP 498, Formatted string literals
import decimal
name = 'Fred'
print(f'His name is {name}')
width, precision, value = 10, 4, decimal.Decimal('12.34567')
print(f'result: {value:{width}.{precision}}')
# -- PEP 526, Syntax for variable annotations
from typing import *
primes: List[int] = []
captain: str
class Starship:
stats: Dict[str, int] = {}
|
py | b402575ef655917ac22a80eec84e1ee2172c161a | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from models_FE.networks.base_network import BaseNetwork
from models_FE.networks.normalization import get_nonspade_norm_layer
from models_FE.networks.architecture import ResnetBlock as ResnetBlock
from models_FE.networks.architecture import SPADEResnetBlock as SPADEResnetBlock
from models_FE.networks.architecture import SPADEResnetBlock_non_spade as SPADEResnetBlock_non_spade
class SPADEGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G="spectralspadesyncbatch3x3")
parser.add_argument(
"--num_upsampling_layers",
choices=("normal", "more", "most"),
default="normal",
help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator",
)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.sw, self.sh = self.compute_latent_vector_size(opt)
# print("The size of the latent vector size is [%d,%d]" % (self.sw, self.sh))
if opt.use_vae:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
if self.opt.no_parsing_map:
self.fc = nn.Conv2d(3, 16 * nf, 3, padding=1)
else:
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "1":
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
else:
self.head_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "2":
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
else:
self.G_middle_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "3":
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
else:
self.up_0 = SPADEResnetBlock_non_spade(16 * nf, 8 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "4":
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
else:
self.up_1 = SPADEResnetBlock_non_spade(8 * nf, 4 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "5":
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
else:
self.up_2 = SPADEResnetBlock_non_spade(4 * nf, 2 * nf, opt)
if self.opt.injection_layer == "all" or self.opt.injection_layer == "6":
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
else:
self.up_3 = SPADEResnetBlock_non_spade(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == "most":
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == "normal":
num_up_layers = 5
elif opt.num_upsampling_layers == "more":
num_up_layers = 6
elif opt.num_upsampling_layers == "most":
num_up_layers = 7
else:
raise ValueError("opt.num_upsampling_layers [%s] not recognized" % opt.num_upsampling_layers)
sw = opt.crop_size // (2 ** num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, degraded_image, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim, dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
if self.opt.no_parsing_map:
x = F.interpolate(degraded_image, size=(self.sh, self.sw), mode="bilinear")
else:
x = F.interpolate(seg, size=(self.sh, self.sw), mode="nearest")
x = self.fc(x)
x = self.head_0(x, seg, degraded_image)
x = self.up(x)
x = self.G_middle_0(x, seg, degraded_image)
if self.opt.num_upsampling_layers == "more" or self.opt.num_upsampling_layers == "most":
x = self.up(x)
x = self.G_middle_1(x, seg, degraded_image)
x = self.up(x)
x = self.up_0(x, seg, degraded_image)
x = self.up(x)
x = self.up_1(x, seg, degraded_image)
x = self.up(x)
x = self.up_2(x, seg, degraded_image)
x = self.up(x)
x = self.up_3(x, seg, degraded_image)
if self.opt.num_upsampling_layers == "most":
x = self.up(x)
x = self.up_4(x, seg, degraded_image)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class Pix2PixHDGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument(
"--resnet_n_downsample", type=int, default=4, help="number of downsampling layers in netG"
)
parser.add_argument(
"--resnet_n_blocks",
type=int,
default=9,
help="number of residual blocks in the global generator network",
)
parser.add_argument(
"--resnet_kernel_size", type=int, default=3, help="kernel size of the resnet block"
)
parser.add_argument(
"--resnet_initial_kernel_size", type=int, default=7, help="kernel size of the first convolution"
)
# parser.set_defaults(norm_G='instance')
return parser
def __init__(self, opt):
super().__init__()
input_nc = 3
# print("xxxxx")
# print(opt.norm_G)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_G)
activation = nn.ReLU(False)
model = []
# initial conv
model += [
nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2),
norm_layer(nn.Conv2d(input_nc, opt.ngf, kernel_size=opt.resnet_initial_kernel_size, padding=0)),
activation,
]
# downsample
mult = 1
for i in range(opt.resnet_n_downsample):
model += [
norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2, kernel_size=3, stride=2, padding=1)),
activation,
]
mult *= 2
# resnet blocks
for i in range(opt.resnet_n_blocks):
model += [
ResnetBlock(
opt.ngf * mult,
norm_layer=norm_layer,
activation=activation,
kernel_size=opt.resnet_kernel_size,
)
]
# upsample
for i in range(opt.resnet_n_downsample):
nc_in = int(opt.ngf * mult)
nc_out = int((opt.ngf * mult) / 2)
model += [
norm_layer(
nn.ConvTranspose2d(nc_in, nc_out, kernel_size=3, stride=2, padding=1, output_padding=1)
),
activation,
]
mult = mult // 2
# final output conv
model += [
nn.ReflectionPad2d(3),
nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0),
nn.Tanh(),
]
self.model = nn.Sequential(*model)
def forward(self, input, degraded_image, z=None):
return self.model(degraded_image)
|
py | b40257801e54fdd1d116d5af29330d09d4322d74 | import fileinput
current_user = None
current_query = None
current_time = None
for line in fileinput.input():
if not fileinput.isfirstline():
fields = line.split('\t')
user_id = fields[0]
query = fields[1]
query_time = fields[2]
if current_query != query:
if current_query is not None:
print '\t'.join((current_user, current_query, current_time))
current_query = query
current_user = user_id
current_time = query_time
print '\t'.join((current_user, current_query, current_time))
|
py | b402580bfb03b6e83888eae377f9ca7a61eed3eb | from datetime import datetime
from django.contrib.postgres.fields import JSONField
from django.contrib.gis.db import models
from django.core.files.base import ContentFile
from django.utils import timezone
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from io import BytesIO
from json2html import json2html
from mptt.models import MPTTModel, TreeForeignKey
from PIL import Image
import os
from organisation.utils import get_photo_path, get_photo_ad_path, convert_ad_timestamp
class DepartmentUser(MPTTModel):
"""Represents a Department user. Maps to an object managed by Active Directory.
"""
ACTIVE_FILTER = {'active': True, 'email__isnull': False, 'cost_centre__isnull': False, 'contractor': False}
# The following choices are intended to match options in Alesco.
ACCOUNT_TYPE_CHOICES = (
(2, 'L1 User Account - Permanent'),
(3, 'L1 User Account - Agency contract'),
(0, 'L1 User Account - Department fixed-term contract'),
(8, 'L1 User Account - Seasonal'),
(6, 'L1 User Account - Vendor'),
(7, 'L1 User Account - Volunteer'),
(1, 'L1 User Account - Other/Alumni'),
(11, 'L1 User Account - RoomMailbox'),
(12, 'L1 User Account - EquipmentMailbox'),
(10, 'L2 Service Account - System'),
(5, 'L1 Group (shared) Mailbox - Shared account'),
(9, 'L1 Role Account - Role-based account'),
(4, 'Terminated'),
(14, 'Unknown - AD disabled'),
(15, 'Cleanup - Permanent'),
(16, 'Unknown - AD active'),
)
# The following is a list of account type of normally exclude from user queries.
# E.g. shared accounts, meeting rooms, terminated accounts, etc.
ACCOUNT_TYPE_EXCLUDE = [4, 5, 9, 10, 11, 12, 14, 16]
# The following is a list of account types set for individual staff/vendors,
# i.e. no shared or role-based account types.
# NOTE: it may not necessarily be the inverse of the previous list.
ACCOUNT_TYPE_USER = [2, 3, 0, 8, 6, 7, 1]
POSITION_TYPE_CHOICES = (
(0, 'Full time'),
(1, 'Part time'),
(2, 'Casual'),
(3, 'Other'),
)
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
cost_centre = models.ForeignKey('organisation.CostCentre', on_delete=models.PROTECT, null=True)
cost_centres_secondary = models.ManyToManyField(
'organisation.CostCentre', related_name='cost_centres_secondary', editable=False,
blank=True, help_text='NOTE: this provides security group access (e.g. T drives).')
org_unit = models.ForeignKey(
'organisation.OrgUnit', on_delete=models.PROTECT, null=True, blank=True,
verbose_name='organisational unit',
help_text="""The organisational unit that represents the user's"""
""" primary physical location (also set their distribution group).""")
org_units_secondary = models.ManyToManyField(
'organisation.OrgUnit', related_name='org_units_secondary', blank=True, editable=False,
help_text='NOTE: this provides email distribution group access.')
extra_data = JSONField(null=True, blank=True)
ad_guid = models.CharField(
max_length=48, unique=True, null=True, blank=True, verbose_name='AD GUID',
help_text='Locally stored AD GUID. This field must match GUID in the AD object for sync to be successful')
azure_guid = models.CharField(
max_length=48, unique=True, null=True, blank=True, verbose_name='Azure GUID',
help_text='Azure AD GUID.')
ad_dn = models.CharField(
max_length=512, unique=True, null=True, blank=True, verbose_name='AD DN',
help_text='AD DistinguishedName value.')
ad_data = JSONField(null=True, blank=True, editable=False)
org_data = JSONField(null=True, blank=True, editable=False)
employee_id = models.CharField(
max_length=128, null=True, unique=True, blank=True, verbose_name='Employee ID',
help_text='HR Employee ID.')
email = models.EmailField(unique=True)
username = models.CharField(
max_length=128, editable=False, blank=True, null=True,
help_text='Pre-Windows 2000 login username.')
name = models.CharField(max_length=128, db_index=True, help_text='Format: [Given name] [Surname]')
given_name = models.CharField(
max_length=128, null=True,
help_text='Legal first name (matches birth certificate/password/etc.)')
surname = models.CharField(
max_length=128, null=True,
help_text='Legal surname (matches birth certificate/password/etc.)')
name_update_reference = models.CharField(
max_length=512, null=True, blank=True, verbose_name='update reference',
help_text='Reference for name/CC change request')
preferred_name = models.CharField(
max_length=256, null=True, blank=True, help_text='Employee-editable preferred name.')
title = models.CharField(
max_length=128, null=True,
help_text='Occupation position title (should match Alesco)')
position_type = models.PositiveSmallIntegerField(
choices=POSITION_TYPE_CHOICES, null=True, blank=True, default=0,
help_text='Employee position working arrangement (should match Alesco status)')
parent = TreeForeignKey(
'self', on_delete=models.PROTECT, null=True, blank=True,
related_name='children', editable=True, verbose_name='Reports to',
help_text='Person that this employee reports to')
expiry_date = models.DateTimeField(
null=True, blank=True, help_text='Date that the AD account will expire.')
date_hr_term = models.DateTimeField(
null=True, blank=True, editable=False, verbose_name='HR termination date', help_text='Date on file with HR as the job termination date.')
hr_auto_expiry = models.BooleanField(
default=False, verbose_name='HR auto expiry', help_text='When the HR termination date changes, automatically update the expiry date to match.')
date_ad_updated = models.DateTimeField(
null=True, editable=False, verbose_name='Date AD updated',
help_text='The date when the AD account was last updated.')
location = models.ForeignKey(
'Location', on_delete=models.PROTECT, null=True, blank=True,
help_text='Current place of work.')
telephone = models.CharField(max_length=128, null=True, blank=True)
mobile_phone = models.CharField(max_length=128, null=True, blank=True)
extension = models.CharField(
max_length=128, null=True, blank=True, verbose_name='VoIP extension')
home_phone = models.CharField(max_length=128, null=True, blank=True)
other_phone = models.CharField(max_length=128, null=True, blank=True)
active = models.BooleanField(
default=True, editable=False,
help_text='Account is active within Active Directory.')
ad_deleted = models.BooleanField(
default=False, editable=False, verbose_name='AD deleted',
help_text='Account has been deleted in Active Directory.')
in_sync = models.BooleanField(
default=False, editable=False,
help_text='CMS data has been synchronised from AD data.')
vip = models.BooleanField(
default=False,
help_text="An individual who carries out a critical role for the department")
executive = models.BooleanField(
default=False, help_text="An individual who is an executive")
contractor = models.BooleanField(
default=False,
help_text="An individual who is an external contractor (does not include agency contract staff)")
photo = models.ImageField(blank=True, upload_to=get_photo_path)
photo_ad = models.ImageField(
blank=True, editable=False, upload_to=get_photo_ad_path)
sso_roles = models.TextField(
null=True, editable=False, help_text="Groups/roles separated by semicolon")
notes = models.TextField(
null=True, blank=True,
help_text='Records relevant to any AD account extension, expiry or deletion (e.g. ticket #).')
working_hours = models.TextField(
default="N/A", null=True, blank=True,
help_text="Description of normal working hours")
secondary_locations = models.ManyToManyField("organisation.Location", blank=True,
related_name='departmentuser_secondary',
help_text="Only to be used for staff working in additional loactions from their cost centre")
populate_primary_group = models.BooleanField(
default=True,
help_text="If unchecked, user will not be added to primary group email")
account_type = models.PositiveSmallIntegerField(
choices=ACCOUNT_TYPE_CHOICES, null=True, blank=True,
help_text='Employee account status (should match Alesco status)')
alesco_data = JSONField(
null=True, blank=True, help_text='Readonly data from Alesco')
security_clearance = models.BooleanField(
default=False, verbose_name='security clearance granted',
help_text='''Security clearance approved by CC Manager (confidentiality
agreement, referee check, police clearance, etc.''')
o365_licence = models.NullBooleanField(
default=None, editable=False,
help_text='Account consumes an Office 365 licence.')
shared_account = models.BooleanField(
default=False, editable=False,
help_text='Automatically set from account type.')
class MPTTMeta:
order_insertion_by = ['name']
def __init__(self, *args, **kwargs):
super(DepartmentUser, self).__init__(*args, **kwargs)
# Store the pre-save values of some fields on object init.
self.__original_given_name = self.given_name
self.__original_surname = self.surname
self.__original_employee_id = self.employee_id
self.__original_cost_centre_id = self.cost_centre_id
self.__original_name = self.name
self.__original_org_unit_id = self.org_unit_id
self.__original_expiry_date = self.expiry_date
self.__original_photo = self.photo
def __str__(self):
return self.email
def save(self, *args, **kwargs):
"""Override the save method with additional business logic.
"""
if self.employee_id:
if (self.employee_id.lower() == "n/a") or (self.employee_id.strip() == ''):
self.employee_id = None
self.in_sync = True if self.date_ad_updated else False
# If the CC is set but not the OrgUnit, use the CC's OrgUnit.
if self.cost_centre and not self.org_unit:
self.org_unit = self.cost_centre.org_position
if self.cost_centre and self.org_unit:
self.org_data = self.org_data or {}
self.org_data["units"] = list(self.org_unit.get_ancestors(include_self=True).values(
"id", "name", "acronym", "unit_type", "costcentre__code",
"costcentre__name", "location__name"))
self.org_data["unit"] = self.org_data["units"][-1] if len(self.org_data["units"]) else None
if self.org_unit.location:
self.org_data["location"] = self.org_unit.location.as_dict()
for unit in self.org_data["units"]:
unit["unit_type"] = self.org_unit.TYPE_CHOICES_DICT[
unit["unit_type"]]
if self.cost_centre:
self.org_data["cost_centre"] = {
"name": self.cost_centre.org_position.name if self.cost_centre.org_position else '',
"code": self.cost_centre.code,
"cost_centre_manager": str(self.cost_centre.manager),
"business_manager": str(self.cost_centre.business_manager),
"admin": str(self.cost_centre.admin),
"tech_contact": str(self.cost_centre.tech_contact),
}
if self.cost_centres_secondary.exists():
self.org_data['cost_centres_secondary'] = [{
'name': i.name,
'code': i.code,
} for i in self.cost_centres_secondary.all()]
if self.org_units_secondary:
self.org_data['org_units_secondary'] = [{
'name': i.name,
'acronym': i.name,
'unit_type': i.get_unit_type_display(),
} for i in self.org_units_secondary.all()]
if self.account_type in [5, 9]: # Shared/role-based account types.
self.shared_account = True
super(DepartmentUser, self).save(*args, **kwargs)
def update_photo_ad(self):
# If the photo is set to blank, clear any AD thumbnail.
if not self.photo:
if self.photo_ad:
self.photo_ad.delete()
return
else:
# Account for missing media files.
try:
self.photo.file
except FileNotFoundError:
return
# Update self.photo_ad to a 240x240 thumbnail >10 kb in size.
if hasattr(self.photo.file, 'content_type'):
PHOTO_TYPE = self.photo.file.content_type
if PHOTO_TYPE == 'image/jpeg':
PIL_TYPE = 'jpeg'
elif PHOTO_TYPE == 'image/png':
PIL_TYPE = 'png'
else:
return
else:
PIL_TYPE = 'jpeg'
# Good defaults to get ~10kb JPEG images
PHOTO_AD_SIZE = (240, 240)
PIL_QUALITY = 75
# Remote file size limit
PHOTO_AD_FILESIZE = 10000
image = Image.open(BytesIO(self.photo.read()))
image.thumbnail(PHOTO_AD_SIZE, Image.LANCZOS)
# In case we miss 10kb, drop the quality and recompress
for i in range(12):
temp_buffer = BytesIO()
image.convert('RGB').save(temp_buffer, PIL_TYPE, quality=PIL_QUALITY, optimize=True)
length = temp_buffer.tell()
if length <= PHOTO_AD_FILESIZE:
break
if PIL_TYPE == 'png':
PIL_TYPE = 'jpeg'
else:
PIL_QUALITY -= 5
temp_buffer.seek(0)
self.photo_ad.save(os.path.basename(self.photo.name),
ContentFile(temp_buffer.read()), save=False)
def org_data_pretty(self):
if not self.org_data:
return self.org_data
return format_html(json2html.convert(json=self.org_data))
def ad_data_pretty(self):
if not self.ad_data:
return self.ad_data
return format_html(json2html.convert(json=self.ad_data))
def alesco_data_pretty(self):
if not self.alesco_data:
return self.alesco_data
return format_html(json2html.convert(json=self.alesco_data, clubbing=False))
@property
def password_age_days(self):
if self.ad_data and 'pwdLastSet' in self.ad_data:
try:
td = datetime.now() - convert_ad_timestamp(self.ad_data['pwdLastSet'])
return td.days
except:
pass
return None
@property
def ad_expired(self):
if self.expiry_date and self.expiry_date < timezone.now():
return True
return False
@property
def children_filtered(self):
return self.children.filter(**self.ACTIVE_FILTER).exclude(account_type__in=self.ACCOUNT_TYPE_EXCLUDE)
@property
def children_filtered_ids(self):
return self.children_filtered.values_list('id', flat=True)
@property
def org_unit_chain(self):
return self.org_unit.get_ancestors(ascending=True, include_self=True).values_list('id', flat=True)
@property
def group_unit(self):
"""Return the group-level org unit, as seen in the primary address book view.
"""
for org in self.org_unit.get_ancestors(ascending=True, include_self=True):
if org.unit_type in (0, 1):
return org
return self.org_unit
def get_gal_department(self):
"""Return a string to place into the "Department" field for the Global Address List.
"""
s = ''
if self.org_data and 'units' in self.org_data and len(self.org_data['units']) > 0:
s = self.org_data['units'][0]['acronym']
if len(self.org_data['units']) > 1:
s += ' - {}'.format(self.org_data['units'][1]['name'])
return s
def get_full_name(self):
# Return given_name and surname, with a space in between.
full_name = '{} {}'.format(self.given_name, self.surname)
return full_name.strip()
class Location(models.Model):
"""A model to represent a physical location.
"""
name = models.CharField(max_length=256, unique=True)
manager = models.ForeignKey(
DepartmentUser, on_delete=models.PROTECT, null=True, blank=True,
related_name='location_manager')
address = models.TextField(unique=True, blank=True)
pobox = models.TextField(blank=True, verbose_name='PO Box')
phone = models.CharField(max_length=128, null=True, blank=True)
fax = models.CharField(max_length=128, null=True, blank=True)
email = models.CharField(max_length=128, null=True, blank=True)
point = models.PointField(null=True, blank=True)
url = models.CharField(
max_length=2000,
help_text='URL to webpage with more information',
null=True,
blank=True)
bandwidth_url = models.CharField(
max_length=2000,
help_text='URL to prtg graph of bw utilisation',
null=True,
blank=True)
ascender_code = models.CharField(max_length=16, null=True, blank=True, unique=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def as_dict(self):
return {k: getattr(self, k) for k in (
'id', 'name', 'address', 'pobox', 'phone', 'fax', 'email') if getattr(self, k)}
class OrgUnit(MPTTModel):
"""Represents an element within the Department organisational hierarchy.
"""
TYPE_CHOICES = (
(0, 'Department (Tier one)'),
(1, 'Division (Tier two)'),
(11, 'Division'),
(9, 'Group'),
(2, 'Branch'),
(7, 'Section'),
(3, 'Region'),
(6, 'District'),
(8, 'Unit'),
(5, 'Office'),
(10, 'Work centre'),
)
TYPE_CHOICES_DICT = dict(TYPE_CHOICES)
unit_type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES)
ad_guid = models.CharField(
max_length=48, unique=True, null=True, editable=False)
ad_dn = models.CharField(
max_length=512, unique=True, null=True, editable=False)
name = models.CharField(max_length=256)
acronym = models.CharField(max_length=16, null=True, blank=True)
manager = models.ForeignKey(
DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)
parent = TreeForeignKey(
'self', on_delete=models.PROTECT, null=True, blank=True,
related_name='children', db_index=True)
details = JSONField(null=True, blank=True)
location = models.ForeignKey(
Location, on_delete=models.PROTECT, null=True, blank=True)
sync_o365 = models.BooleanField(
default=True, help_text='Sync this to O365 (creates a security group).')
active = models.BooleanField(default=True)
class MPTTMeta:
order_insertion_by = ['name']
class Meta:
ordering = ('name',)
def cc(self):
return ', '.join([str(x) for x in self.costcentre_set.all()])
def __str__(self):
name = self.name
if self.acronym:
name = '{} - {}'.format(self.acronym, name)
#if self.cc():
# return '{} - CC {}'.format(name, self.cc())
return name
def members(self):
return DepartmentUser.objects.filter(org_unit__in=self.get_descendants(
include_self=True), **DepartmentUser.ACTIVE_FILTER)
def save(self, *args, **kwargs):
self.details = self.details or {}
self.details.update({
'type': self.get_unit_type_display(),
})
super(OrgUnit, self).save(*args, **kwargs)
if not getattr(self, 'cheap_save', False):
for user in self.members():
user.save()
def get_descendants_active(self, *args, **kwargs):
"""Exclude 'inactive' OrgUnit objects from get_descendants() queryset.
Returns a list of OrgUnits.
"""
descendants = self.get_descendants(*args, **kwargs).exclude(active=False)
return descendants
class CostCentre(models.Model):
"""Models the details of a Department cost centre / chart of accounts.
"""
name = models.CharField(max_length=128, unique=True, editable=False)
code = models.CharField(max_length=16, unique=True)
chart_acct_name = models.CharField(
max_length=256, blank=True, null=True, verbose_name='chart of accounts name')
division = models.ForeignKey(
OrgUnit, on_delete=models.PROTECT, null=True, editable=False, related_name='costcentres_in_division')
org_position = models.ForeignKey(
OrgUnit, on_delete=models.PROTECT, blank=True, null=True)
manager = models.ForeignKey(
DepartmentUser, on_delete=models.PROTECT, related_name='manage_ccs',
null=True, blank=True)
business_manager = models.ForeignKey(
DepartmentUser, on_delete=models.PROTECT, related_name='bmanage_ccs',
help_text='Business Manager', null=True, blank=True)
admin = models.ForeignKey(
DepartmentUser, on_delete=models.PROTECT, related_name='admin_ccs',
help_text='Adminstration Officer', null=True, blank=True)
tech_contact = models.ForeignKey(
DepartmentUser, on_delete=models.PROTECT, related_name='tech_ccs',
help_text='Technical Contact', null=True, blank=True)
ascender_code = models.CharField(max_length=16, null=True, blank=True, unique=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('code',)
def save(self, *args, **kwargs):
self.name = str(self)
# If the CC is linked to an OrgUnit, link it to that unit's Division.
if self.org_position:
division = self.org_position.get_ancestors(
include_self=True).filter(unit_type=1)
self.division = division.first()
else:
self.division = None
# Iterate through each DepartmentUser assigned to this CC to cache
# any org stucture/CC changes on that object.
for user in self.departmentuser_set.filter(active=True):
user.save()
super(CostCentre, self).save(*args, **kwargs)
def __str__(self):
return self.code
class CommonFields(models.Model):
"""Meta model class used by other apps
"""
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
org_unit = models.ForeignKey(OrgUnit, on_delete=models.PROTECT, null=True, blank=True)
cost_centre = models.ForeignKey(CostCentre, on_delete=models.PROTECT, null=True, blank=True)
extra_data = JSONField(null=True, blank=True)
def extra_data_pretty(self):
if not self.extra_data:
return self.extra_data
try:
return format_html(json2html.convert(json=self.extra_data))
except Exception as e:
return repr(e)
def save(self, *args, **kwargs):
if self.cost_centre and not self.org_unit:
self.org_unit = self.cost_centre.org_position
#elif self.cost_centre and self.cost_centre.org_position and self.org_unit not in self.cost_centre.org_position.get_descendants(include_self=True):
# self.org_unit = self.cost_centre.org_position
super(CommonFields, self).save(*args, **kwargs)
class Meta:
abstract = True
|
py | b402586bf53b38ff4203bc18b430e9c8ffb5efe8 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Helio de Jesus and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('SERVICOS')
class TestSERVICOS(unittest.TestCase):
pass
|
py | b4025ae79139a0c4ecf796d8222ecc5c672f15dc | # coding=utf-8
"""
The Micro Service Base Framework.
Base on Tornado Web Framework to build mse service and integrate Aliyun MSE and Skywalking.
"""
version = "0.0.1"
|
py | b4025e385cbdb80d1f265bf8255e6f0b42214071 | import json
from flask import Flask, request, abort, make_response, Response
from sqlalchemy.orm.exc import NoResultFound
from functools import wraps
from server.models import *
app = Flask(__name__)
app.config.update(svrcfg['flask'])
def loadData():
pass
def authenticate(role=None):
def wrapper(func):
@wraps(func)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth:
abort(401, description='You must provide authentication information')
try:
user = session.query(User).filter(User.id == auth.username).one()
except NoResultFound:
abort(401, description='The user {} does nott exist!'.format(auth.username))
if user.password != auth.password:
abort(401, description='Password of user {} is incorrect!'.format(auth.username))
if role is not None and user.role != role:
abort(401, description='Unauthorized Access')
return func(*args, user=user, **kwargs)
return decorated
return wrapper
def errorjson(error, status):
responseobj = {'message': error.description, 'http_code': status}
return Response(to_json(responseobj), status=status, mimetype='application/json')
@app.errorhandler(400)
def badrequest(error):
return errorjson(error, 400)
@app.errorhandler(401)
def unauthenticated(error):
return make_response(error.description, 401, {'WWW-Authenticate': 'Basic realm="flask-chiasma"'})
@app.errorhandler(403)
def badrequest(error):
return errorjson(error, 403)
def to_json(obj):
if hasattr(obj, 'to_json'):
return obj.to_json()
if isinstance(obj, list):
return '[\r\n' + ',\r\n'.join(elem.to_json() for elem in obj) + '\r\n]'
else:
return json.dumps(obj)
def jsonresp(func):
@wraps(func)
def decorated(*args, **kwargs):
respobj = func(*args, **kwargs)
return Response(to_json(respobj), status=200, mimetype='application/json')
return decorated
@app.cli.command('initdb')
def initdb_command():
print('Initializing the database...')
print('Creating tables...')
init_schema()
print('All tables created. Populating all data...')
user1 = User('cumuli', 'cumuli123', 'admin')
user2 = User('normal', 'normal123', 'common')
session.add(user1)
session.add(user2)
session.commit()
silo1 = Silo('silo1', 'cumuli')
silo2 = Silo('silo2', 'normal')
session.add(silo1)
session.add(silo2)
session.commit()
webserver = DnsRecord('silo1', 'webserver', '127.0.0.1')
dbserver = DnsRecord('silo1', 'dbserver', '127.0.0.1')
session.add(webserver)
session.add(dbserver)
webserver2 = DnsRecord('silo2', 'webserver', '127.0.0.1')
dbserver2 = DnsRecord('silo2', 'dbserver', '127.0.0.1')
session.add(webserver2)
session.add(dbserver2)
session.commit()
print('Initialized the database.')
@app.teardown_appcontext
def shutdown_session(exception=None):
session.remove()
@app.route('/', methods=['GET'])
@jsonresp
def getindex():
return {'message': 'Please refer to our api'}
@app.route('/ping', methods=['GET', 'POST', 'PUT'])
@jsonresp
def ping():
return {'message': 'PONG'}
@app.route('/ip', methods=['GET'])
@jsonresp
def getip():
"""
Echos client's IP address
"""
# request.META['REMOTE_ADDR']
return {'ip': request.remote_addr}
@app.route('/silos', methods=['GET'])
@jsonresp
@authenticate('admin')
def listsilos(user):
result = session.query(Silo).all()
return result
@app.route('/silos/<string:silo_id>', methods=['GET'])
@jsonresp
@authenticate()
def getsilo(silo_id, user):
try:
silo = session.query(Silo).filter(Silo.id==silo_id).one()
except NoResultFound:
abort(404, '%s not found' % silo_id)
if silo.user_id != user.id:
abort(401, description='You do not have the access to the silo')
return silo
@app.route('/silos/<string:silo_id>', methods=['PUT'])
@jsonresp
@authenticate()
def putsilo(silo_id, user):
try:
silo = session.query(Silo).filter(Silo.id==silo_id).one()
except NoResultFound:
abort(404, "%s not found" % silo_id)
if silo.user_id != user.id:
abort(401, description='You do not have the access to the silo')
try:
reqjson = request.get_json(force=True, silent=True)
if not reqjson:
abort(400, 'The request must be a valid json')
if not 'id' in reqjson:
abort(400, 'The request body json must contain a valid "ip" field')
if silo_id != reqjson['id']:
abort(403, 'The silo\'s id in request must match the one in URL. "%s" v.s. "%s"' % (silo_id, reqjson['id']) )
if not 'dnsrecords' in reqjson:
abort(400, 'The request body json must contain a valid "dnsrecords" field')
dns_records = reqjson['dnsrecords']
for dns_record in dns_records:
if not 'hostname' in dns_record:
abort(400, 'The dnsrecord "%s" must have a valid "hostname" key' % dns_record)
if not 'ip' in dns_record:
abort(400, 'The dnsrecord "%s" must have a valid "ip" key' % dns_record)
session.query(DnsRecord).filter(DnsRecord.silo_id==silo_id).delete()
session.query(Silo).filter(Silo.id==silo_id).delete()
silo = Silo(silo_id)
session.add(silo)
for dns_record in dns_records:
dnsrecord = DnsRecord(silo_id, dns_record['hostname'], dns_record['ip'])
session.add(dnsrecord)
session.commit()
return session.query(Silo).filter(Silo.id==silo_id).one()
except:
session.rollback()
raise
@app.route('/silos/<string:silo_id>', methods=['DELETE'])
@jsonresp
@authenticate('admin')
def deletesilo(silo_id, user):
try:
silo = session.query(Silo).filter(Silo.id==silo_id).one()
except NoResultFound:
abort(404, "%s not found" % silo_id)
if silo.user_id != user.id:
abort(401, description='You do not have the access to the silo')
try:
session.query(DnsRecord).filter(DnsRecord.silo_id==silo_id).delete()
session.query(Silo).filter(Silo.id==silo_id).delete()
session.commit()
except:
session.rollback()
raise
if __name__ == '__main__':
loadData()
print("Running web service... Press CTRL-C to terminate")
app.run()
|
py | b4025f40921362dea4e247c0a4df1d217220b757 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
from scipy.io.wavfile import write
import torch
from mel2samp import files_to_list, MAX_WAV_VALUE
from denoiser import Denoiser
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def main(mel_files, waveglow_path, sigma, output_dir, sampling_rate, is_fp16,
denoiser_strength):
mel_files = files_to_list(mel_files)
waveglow = torch.load(waveglow_path)['model']
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow.cuda().eval()
if is_fp16:
from apex import amp
waveglow, _ = amp.initialize(waveglow, [], opt_level="O3")
if denoiser_strength > 0:
denoiser = Denoiser(waveglow).cuda()
for i, file_path in enumerate(mel_files):
file_name = os.path.splitext(os.path.basename(file_path))[0]
mel = torch.load(file_path)
mel = torch.autograd.Variable(mel.cuda())
mel = torch.unsqueeze(mel, 0)
mel = mel.half() if is_fp16 else mel
with torch.no_grad():
audio = waveglow.infer(mel, sigma=sigma)
if denoiser_strength > 0:
audio = denoiser(audio, denoiser_strength)
audio = audio * MAX_WAV_VALUE
audio = audio.squeeze()
audio = audio.cpu().numpy()
audio = audio.astype('int16')
audio_path = os.path.join(
output_dir, "{}_synthesis.wav".format(file_name))
write(audio_path, sampling_rate, audio)
print(audio_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--filelist_path", required=True)
parser.add_argument('-w', '--waveglow_path',
help='Path to waveglow decoder checkpoint with model')
parser.add_argument('-o', "--output_dir", required=True)
parser.add_argument("-s", "--sigma", default=1.0, type=float)
parser.add_argument("--sampling_rate", default=16000, type=int)
parser.add_argument("--is_fp16", action="store_true")
parser.add_argument("-d", "--denoiser_strength", default=0.0, type=float,
help='Removes model bias. Start with 0.1 and adjust')
args = parser.parse_args()
main(args.filelist_path, args.waveglow_path, args.sigma, args.output_dir,
args.sampling_rate, args.is_fp16, args.denoiser_strength)
|
py | b40260b584d0d37829784676558010b1c6b394fb | #!/usr/bin/python
# coding:utf-8
# Author: ASU --<[email protected]>
# Purpose: utility library
"""
Python module that gives you a dictionary whose values are both gettable and settable using both attribute and getitem syntax
"""
import copy
import json
from typing import MutableMapping
from pyxtension.streams import *
__author__ = 'ASU'
supermethod = lambda self: super(self.__class__, self)
class JsonList(slist):
@classmethod
def __decide(cls, j):
if isinstance(j, dict):
return Json(j)
elif isinstance(j, (list, tuple)) and not isinstance(j, JsonList):
return JsonList(list(map(Json._toJ, j)))
elif isinstance(j, stream):
return JsonList(j.map(Json._toJ).toList())
else:
return j
def __init__(self, *args):
slist.__init__(self, stream(*args).map(lambda j: JsonList.__decide(j)))
def toOrig(self):
return [isinstance(t, (Json, JsonList)) and t.toOrig() or t for t in self]
def toString(self):
return json.dumps(self)
K = TypeVar('K')
V = TypeVar('V')
class Json(sdict, dict, MutableMapping[K, V]):
FORBIDEN_METHODS = ('__methods__', '__members__') # Introduced due to PyCharm debugging accessing these methods
@classmethod
def __myAttrs(cls):
return set(dir(cls))
@staticmethod
def load(fp, *args, **kwargs):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
"""
return Json.loads(fp.read(), *args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
"""
d = json.loads(*args, **kwargs)
if isinstance(d, dict):
return Json(d)
elif isinstance(d, list):
return JsonList(d)
else:
raise NotImplementedError("Unknown JSON format: {}".format(d.__class__))
@staticmethod
def fromString(s, *args, **kwargs):
return Json.loads(s, *args, **kwargs)
__decide = lambda self, j: isinstance(j, dict) and Json(j) or (isinstance(j, list) and slist(j) or j)
@classmethod
def _toJ(cls, j):
if isinstance(j, Json):
return j
elif isinstance(j, dict):
return Json(j)
elif isinstance(j, JsonList):
return j
elif isinstance(j, list):
return JsonList(j)
else:
return j
def __init__(self, *args, **kwargs):
if not kwargs and len(args) == 1 and isinstance(args[0], (str, bytes)):
d = json.loads(args[0])
assert isinstance(d, dict)
sdict.__init__(self, d)
elif len(args) >= 2 and isinstance(args[0], (tuple, list)):
sdict.__init__(self, args)
else:
sdict.__init__(self, *args, **kwargs)
def __getitem__(self, name):
"""
This is called when the Dict is accessed by []. E.g.
some_instance_of_Dict['a'];
If the name is in the dict, we return it. Otherwise we set both
the attr and item to a new instance of Dict.
"""
if name in self:
d = sdict.__getitem__(self, name)
if isinstance(d, dict) and not isinstance(d, Json):
j = Json(d)
sdict.__setitem__(self, name, j)
return j
elif isinstance(d, list) and not isinstance(d, JsonList):
j = JsonList(d)
sdict.__setitem__(self, name, j)
return j
elif isinstance(d, set) and not isinstance(d, sset):
j = sset(d)
sdict.__setitem__(self, name, j)
return j
else:
return d
else:
j = Json()
sdict.__setitem__(self, name, j)
return j
def __getattr__(self, item):
if item in self.FORBIDEN_METHODS:
raise AttributeError("Forbidden methods access to %s. Introduced due to PyCharm debugging problem." % str(
self.FORBIDEN_METHODS))
return self.__getitem__(item)
def __setattr__(self, key, value):
if key not in self.__myAttrs():
self[key] = value
else:
raise AttributeError("'%s' object attribute '%s' is read-only" % (str(self.__class__), key))
def __iter__(self):
return super(Json, self).__iter__()
def items(self):
return stream(dict.items(self)).map(lambda kv: (kv[0], Json._toJ(kv[1])))
def keys(self):
return stream(dict.keys(self))
def values(self):
return stream(dict.values(self)).map(Json._toJ)
def __str__(self):
return json.dumps(self.toOrig(), separators=(',', ':'), default=lambda k: str(k))
def dump(self, *args, **kwargs):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
return json.dump(self.toOrig(), *args, **kwargs)
def dumps(self, *args, **kwargs):
"""Serialize ``self`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, all non-ASCII characters are not escaped, and
the return value may be a ``unicode`` instance. See ``dump`` for details.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
return json.dumps(self.toOrig(), *args, **kwargs)
def toString(self):
"""
:return: deterministic sorted output string, that can be compared
:rtype: str
"""
return str(self)
"""To be removed and make Json serializable"""
def __eq__(self, y):
return super(Json, self).__eq__(y)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return str(self)
def copy(self):
return Json(super(Json, self).copy())
def __deepcopy__(self, memo):
return Json(copy.deepcopy(self.toOrig(), memo))
def __delattr__(self, name):
if name in self:
return supermethod(self).__delitem__(name)
else:
raise AttributeError("%s instance has no attribute %s" % (str(self.__class__), name))
def toOrig(self):
"""
Converts Json to a native dict
:return: stream dictionary
:rtype: sdict
"""
return sdict(
self.items()
.map(lambda kv: (kv[0], isinstance(kv[1], (Json, JsonList)) and kv[1].toOrig() or kv[1]))
)
class FrozenJson(Json):
def __init__(self, *args, **kwargs):
super(FrozenJson, self).__init__(*args, **kwargs)
def __setattr__(self, key, value):
raise TypeError("Can not update a FrozenJson instance by (key,value): ({},{})".format(key, value))
def __hash__(self):
return hash(self.toString())
|
py | b40261908173814658f223d5a36e4b9172be4e48 | import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertTrue('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
|
py | b40261be0b32d53123304269f002195553a97ddf | from qulacs import QuantumState
from qulacs.gate import CNOT, H, RY
from skqulacs.circuit import show_blochsphere
def test_bloch():
n = 3
state = QuantumState(n)
state.set_computational_basis(0b000)
H(0).update_quantum_state(state)
show_blochsphere(state, 0)
RY(0, 0.1).update_quantum_state(state)
show_blochsphere(state, 0)
CNOT(0, 1).update_quantum_state(state)
show_blochsphere(state, 0)
show_blochsphere(state, 1)
show_blochsphere(state, 2)
|
py | b402622f12a1b6fbb5243cbe76cddb890117c4dc | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create the Chrome Updater Installer archive.
This script is used to create an archive of all the files required for a
Chrome Updater install in appropriate directory structure. It reads
updater.release file as input, creates updater.7z ucompressed archive, and
generates the updater.packed.7z compressed archive.
"""
import configparser
import glob
import optparse
import os
import shutil
import subprocess
import sys
# Directory name inside the uncompressed archive where all the files are.
UPDATER_DIR = "bin"
# Suffix to uncompressed full archive file, appended to options.output_name.
ARCHIVE_SUFFIX = ".7z"
# compressed full archive suffix, will be prefixed by options.output_name.
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
g_archive_inputs = []
def CompressUsingLZMA(build_dir, compressed_file, input_file, verbose):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). These arguments are the similar to what the Chrome mini
# installer is using.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
os.path.abspath(compressed_file),
os.path.abspath(input_file),]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd, verbose)
def CopyAllFilesToStagingDir(config, staging_dir, build_dir, timestamp):
"""Copies the files required for installer archive.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir,
timestamp)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir,
timestamp):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
src_subdir = option.replace('\\', os.sep)
dst_dir = os.path.join(staging_dir, config.get(section, option))
dst_dir = dst_dir.replace('\\', os.sep)
src_paths = glob.glob(os.path.join(src_dir, src_subdir))
for src_path in src_paths:
if dst_dir.endswith(os.sep):
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
else:
dst_path = dst_dir
if not os.path.exists(dst_path):
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_dir))
g_archive_inputs.append(src_path)
shutil.copy(src_path, dst_path)
os.utime(dst_path, (os.stat(dst_path).st_atime, timestamp))
os.utime(dst_dir, (os.stat(dst_dir).st_atime, timestamp))
def GetLZMAExec(build_dir):
if sys.platform == 'win32':
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
else:
lzma_exec = '7zr' # Use system 7zr.
return lzma_exec
def MakeStagingDirectory(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
return file_path
def Readconfig(input_file):
"""Reads config information from input file after setting default value of
global variables.
"""
variables = {}
variables['UpdaterDir'] = UPDATER_DIR
config = configparser.ConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, verbose):
"""Runs |cmd|, prints the |cmd| and its output if |verbose|; otherwise
captures its output and only emits it on failure.
"""
if verbose:
print('Running', cmd)
try:
# Run |cmd|, redirecting stderr to stdout in order for captured errors to be
# inline with corresponding stdout.
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if verbose:
print(output)
except subprocess.CalledProcessError as e:
raise Exception("Error while running cmd: %s\n"
"Exit code: %s\n"
"Command output:\n%s" %
(e.cmd, e.returncode, e.output))
def CreateArchiveFile(options, staging_dir, timestamp):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (updater.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
if options.depfile:
# If a depfile was requested, do the glob of the staging dir and generate
# a list of dependencies in .d format. We list the files that were copied
# into the staging dir, not the files that are actually in the staging dir
# because the ones in the staging dir will never be edited, and we want
# to have the build be triggered when the thing-that-was-copied-there
# changes.
def PathFixup(path):
"""Fixes path for depfile format: backslash to forward slash, and
backslash escaping for spaces."""
return path.replace('\\', '/').replace(' ', '\\ ')
# Gather the list of files in the staging dir that will be zipped up. We
# only gather this list to make sure that g_archive_inputs is complete (i.e.
# that there's not file copies that got missed).
staging_contents = []
for root, files in os.walk(os.path.join(staging_dir, UPDATER_DIR)):
for filename in files:
staging_contents.append(PathFixup(os.path.join(root, filename)))
# Make sure there's an archive_input for each staging dir file.
for staging_file in staging_contents:
for archive_input in g_archive_inputs:
archive_rel = PathFixup(archive_input)
if (os.path.basename(staging_file).lower() ==
os.path.basename(archive_rel).lower()):
break
else:
raise Exception('Did not find an archive input file for "%s"' %
staging_file)
# Finally, write the depfile referencing the inputs.
with open(options.depfile, 'wb') as f:
f.write(PathFixup(os.path.relpath(archive_file, options.build_dir)) +
': \\\n')
f.write(' ' + ' \\\n '.join(PathFixup(x) for x in g_archive_inputs))
# It is important to use abspath to create the path to the directory because
# if you use a relative path without any .. sequences then 7za.exe uses the
# entire relative path as part of the file paths in the archive. If you have
# a .. sequence or an absolute path then only the last directory is stored as
# part of the file paths in the archive, which is what we want.
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.abspath(os.path.join(staging_dir, UPDATER_DIR)),
'-mx0',]
# There does not seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd, options.verbose)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd, options.verbose)
# Do not compress the archive when skip_archive_compression is specified.
if options.skip_archive_compression:
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
os.utime(archive_file, (os.stat(archive_file).st_atime, timestamp))
CompressUsingLZMA(options.build_dir, compressed_archive_file_path,
archive_file, options.verbose)
return compressed_archive_file
_RESOURCE_FILE_HEADER = """\
// This file is automatically generated by create_installer_archive.py.
// It contains the resource entries that are going to be linked inside the exe.
// For each file to be linked there should be two lines:
// - The first line contains the output filename (without path) and the
// type of the resource ('BN' - not compressed , 'BL' - LZ compressed,
// 'B7' - LZMA compressed)
// - The second line contains the path to the input file. Uses '/' to
// separate path components.
"""
def CreateResourceInputFile(
output_dir, archive_file, resource_file_path,
component_build, staging_dir):
"""Creates resource input file for installer target."""
# An array of (file, type, path) tuples of the files to be included.
resources = [(archive_file, 'B7',
os.path.join(output_dir, archive_file))]
with open(resource_file_path, 'w') as f:
f.write(_RESOURCE_FILE_HEADER)
for (file, type, path) in resources:
f.write('\n%s %s\n "%s"\n' % (file, type, path.replace("\\","/")))
def ParseDLLsFromDeps(build_dir, runtime_deps_file):
"""Parses the runtime_deps file and returns the set of DLLs in it, relative
to build_dir."""
build_dlls = set()
args = open(runtime_deps_file).read()
for l in args.splitlines():
if os.path.splitext(l)[1] == ".dll":
build_dlls.add(os.path.join(build_dir, l))
return build_dlls
# Copies component build DLLs for the setup to be able to find those DLLs at
# run-time.
# This is meant for developer builds only and should never be used to package
# an official build.
def DoComponentBuildTasks(staging_dir, build_dir, setup_runtime_deps):
installer_dir = os.path.join(staging_dir, UPDATER_DIR)
if not os.path.exists(installer_dir):
os.mkdir(installer_dir)
setup_component_dlls = ParseDLLsFromDeps(build_dir, setup_runtime_deps)
for setup_component_dll in setup_component_dlls:
g_archive_inputs.append(setup_component_dll)
shutil.copy(setup_component_dll, installer_dir)
def main(options):
"""Main method that reads input file, creates archive file and writes
resource input file.
"""
config = Readconfig(options.input_file)
staging_dir = MakeStagingDirectory(options.staging_dir)
# Copy the files from the build dir.
CopyAllFilesToStagingDir(config, staging_dir, options.build_dir,
options.timestamp)
if options.component_build == '1':
DoComponentBuildTasks(staging_dir, options.build_dir,
options.setup_runtime_deps)
# Name of the archive file built (for example - updater.7z)
archive_file = CreateArchiveFile(options, staging_dir, options.timestamp)
CreateResourceInputFile(options.output_dir,
archive_file, options.resource_file_path,
options.component_build == '1', staging_dir)
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('-i', '--input_file',
help='Input file describing which files to archive.')
parser.add_option('-b', '--build_dir',
help='Build directory. The paths in input_file are relative to this.')
parser.add_option('--staging_dir',
help='Staging directory where intermediate files and directories '
'will be created')
parser.add_option('-o', '--output_dir',
help='The output directory where the archives will be written. '
'Defaults to the build_dir.')
parser.add_option('--resource_file_path',
help='The path where the resource file will be output. ')
parser.add_option('-s', '--skip_rebuild_archive',
default="False", help='Skip re-building updater.7z archive if it exists.')
parser.add_option('-n', '--output_name', default='updater',
help='Name used to prefix names of generated archives.')
parser.add_option('--component_build', default='0',
help='Whether this archive is packaging a component build.')
parser.add_option('--skip_archive_compression',
action='store_true', default=False,
help='Turn off compression of updater.7z into updater.packed.7z and '
'helpfully delete any old updater.packed.7z in |output_dir|.')
parser.add_option('--depfile',
help='Generate a depfile with the given name listing the implicit inputs '
'to the archive process that can be used with a build system.')
parser.add_option('--setup_runtime_deps',
help='A file listing runtime dependencies for setup.exe. This will be '
'used to get a list of DLLs to archive in a component build.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False)
parser.add_option(
'--timestamp',
type='int',
help='Timestamp to set archive entry modified times to.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('You must provide a build dir.')
options.build_dir = os.path.normpath(options.build_dir)
if not options.staging_dir:
parser.error('You must provide a staging dir.')
if not options.input_file:
parser.error('You must provide an input file')
is_component_build = options.component_build == '1'
if is_component_build and not options.setup_runtime_deps:
parser.error("updater_runtime_deps must be specified for a component build")
if not options.output_dir:
options.output_dir = options.build_dir
return options
if '__main__' == __name__:
options = _ParseOptions()
if options.verbose:
print(sys.argv)
sys.exit(main(options))
|
py | b402623afc5f4721f12bc15a0fcda9e3444cbb3d | import unittest
from obfuscator.strategy.workers_pool import WorkersPool
from obfuscator.strategy.utils import *
init_logger()
def multiply(x):
return x ** 5
def run_me(pool, list_num):
return list(pool.map(multiply, list_num))
class TestWorkersPool(unittest.TestCase):
def test_choices(self):
choices = WorkersPool.choices()
self.assertEqual(len(choices), 4)
self.assertTrue(type(WorkersPool.default(5)), type(WorkersPool.thread_pool(5)))
def _test_pool_result(self):
workers = 3
expected_result = [32, 243, 1024, 3125]
with WorkersPool.multiprocess(workers) as pool1, WorkersPool.thread_pool_executor(
workers
) as pool2, WorkersPool.thread_pool(workers) as pool3, WorkersPool.greenlets(workers) as pool4:
for index, pool in enumerate((pool1, pool2, pool3, pool4), 1):
info(f"Pool #{index}")
self.assertEqual(run_me(pool1, range(2, 6)), expected_result)
|
py | b40263f3c89c0562fd3f022f78d79cce9e7f3379 | # Generated by Django 2.1.5 on 2019-02-05 16:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('openbook_communities', '0004_communitymoderatoruseractionlog_target_user'),
]
operations = [
migrations.CreateModel(
name='CommunityAdministratorUserActionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
('action_type', models.CharField(choices=[('AM', 'Add Moderator'), ('RM', 'Remove Moderator'), ('AA', 'Add Administrator'), ('RA', 'Remove Administrator')], editable=False, max_length=2)),
('administrator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='administrators_user_actions_logs', to='openbook_communities.Community')),
('target_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='communitymoderatoruseractionlog',
name='action_type',
field=models.CharField(choices=[('B', 'Ban'), ('U', 'Unban'), ('AM', 'Add Moderator'), ('RM', 'Remove Moderator'), ('AA', 'Add Administrator'), ('RA', 'Remove Administrator')], editable=False, max_length=2),
),
]
|
py | b402643d574ab09809451cdc72849f5ee538c9d3 | # --------------------------------------------------------
# DaSiamRPN
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
#!/usr/bin/python
import argparse, cv2, torch, json
import numpy as np
from os import makedirs
from os.path import realpath, dirname, join, isdir, exists
from net import SiamRPNotb
from run_defense import SiamRPN_init, SiamRPN_track
from utils import rect_2_cxy_wh, cxy_wh_2_rect
parser = argparse.ArgumentParser(description='PyTorch SiamRPN OTB Test')
parser.add_argument('--dataset', dest='dataset', default='OTB2015', help='datasets')
parser.add_argument('-v', '--visualization', dest='visualization', action='store_true',
help='whether visualize result')
def track_video(model, video):
image_save = 0
toc, regions = 0, []
image_files, gt = video['image_files'], video['gt']
for f, image_file in enumerate(image_files):
im = cv2.imread(image_file) # TODO: batch load
tic = cv2.getTickCount()
if f == 0: # init
target_pos, target_sz = rect_2_cxy_wh(gt[f])
state = SiamRPN_init(im, target_pos, target_sz, model) # init tracker
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
regions.append(gt[f])
att_per = 0 # adversarial perturbation in attack
def_per = 0 # adversarial perturbation in defense
elif f > 0: # tracking
if f % 30 == 1: # clean the perturbation from last frame
att_per = 0
def_per = 0
state, att_per, def_per = SiamRPN_track(state, im, f, regions[f-1], att_per, def_per, image_save, iter=10) # gt_track
location = cxy_wh_2_rect(state['target_pos']+1, state['target_sz'])
regions.append(location)
else:
state, att_per, def_per = SiamRPN_track(state, im, f, regions[f-1], att_per, def_per, image_save, iter=5) # gt_track
location = cxy_wh_2_rect(state['target_pos']+1, state['target_sz'])
regions.append(location)
toc += cv2.getTickCount() - tic
if args.visualization and f >= 0: # visualization
if f == 0: cv2.destroyAllWindows()
if len(gt[f]) == 8:
cv2.polylines(im, [np.array(gt[f], np.int).reshape((-1, 1, 2))], True, (0, 255, 0), 2)
else:
cv2.rectangle(im, (gt[f, 0], gt[f, 1]), (gt[f, 0] + gt[f, 2], gt[f, 1] + gt[f, 3]), (0, 255, 0), 2)
if len(location) == 8:
cv2.polylines(im, [location.reshape((-1, 1, 2))], True, (0, 255, 255), 2)
else:
location = [int(l) for l in location] #
cv2.rectangle(im, (location[0], location[1]),
(location[0] + location[2], location[1] + location[3]), (0, 255, 255), 2)
cv2.putText(im, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(video['name'], im)
cv2.waitKey(1)
toc /= cv2.getTickFrequency()
# save result
video_path = join('test', args.dataset, 'DaSiamRPN_defense')
if not isdir(video_path): makedirs(video_path)
result_path = join(video_path, '{:s}.txt'.format(video['name']))
with open(result_path, "w") as fin:
for x in regions:
fin.write(','.join([str(i) for i in x])+'\n')
print('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(
v_id, video['name'], toc, f / toc))
return f / toc
def load_dataset(dataset):
base_path = join(realpath(dirname(__file__)), 'data', dataset)
if not exists(base_path):
print("Please download OTB dataset into `data` folder!")
exit()
json_path = join(realpath(dirname(__file__)), 'data', dataset + '.json')
info = json.load(open(json_path, 'r'))
for v in info.keys():
path_name = info[v]['name']
info[v]['image_files'] = [join(base_path, path_name, 'img', im_f) for im_f in info[v]['image_files']]
info[v]['gt'] = np.array(info[v]['gt_rect'])-[1,1,0,0] # our tracker is 0-index
info[v]['name'] = v
return info
def main():
global args, v_id
args = parser.parse_args()
net = SiamRPNotb()
net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model')))
#net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNVOT.model')))
net.eval().cuda()
dataset = load_dataset(args.dataset)
fps_list = []
for v_id, video in enumerate(dataset.keys()):
fps_list.append(track_video(net, dataset[video]))
print('Mean Running Speed {:.1f}fps'.format(np.mean(np.array(fps_list))))
if __name__ == '__main__':
main()
|
py | b40264d472454dbd799a2d7c3b92b9524efb4541 | import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import tqdm, trange
def train_step(model, input, target, loss_function, optimizer):
"""
Utility function to use with training of sub-classed models. Trains model on given data.
Args:
model (tf.keras.Model): model instance to train
input: training data
target: to training data corresponding labels
loss_function (tf.keras.losses object): loss function to calculate loss of the model
optimizer (tf.keras.optimizer object): optimizer to use for training the model
Returns:
loss (float): combined loss of the model after training on the given data
"""
with tf.GradientTape() as tape:
prediction = model(input, training=True)
loss = loss_function(target, prediction)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def test_step(model, test_data, loss_function):
"""
Utility function to use with training of sub-classed models. Performs model evaluation on test data.
Args:
model (tf.keras.Model): model instance to test
test_data (tf.data.Dataset): dataset for testing
loss_function (tf.keras.losses object): loss function to calculate loss of the model
Returns:
test_loss (float): mean loss of the model on the test set
"""
#test_accuracy_aggregator = []
test_loss_aggregator = []
for (input, target) in test_data:
prediction = model(input)
sample_test_loss = loss_function(target, prediction)
#sample_test_accuracy =
test_loss_aggregator.append(sample_test_loss.numpy())
test_loss = tf.reduce_mean(test_loss_aggregator)
return test_loss
def training_loop(train_data, test_data, model, loss_function, optimizer, nr_epochs, plot=True):
"""
Utility function to train sub-classed models. Loops nr_epochs times over the given dataset.
Args:
train_data (tf.data.Dataset): dataset for training
test_data (tf.data.Dataset): dataset for testing
model (tf.keras.Model): model instance to train
loss_function (tf.keras.losses object): loss function to use for backpropagation
optimizer (tf.keras.optimizer object): optimizer to use for training the model
nr_epochs (int): number of epochs to train for
plot (Bool): whether to visualize results of the training process in a graph. Default: True
Returns:
None
"""
tf.keras.backend.clear_session()
train_losses = []
test_losses = []
# testing once before training
test_loss = test_step(model, test_data, loss_function)
test_losses.append(test_loss)
# training
for epoch in trange(nr_epochs, unit='epoch', desc='Training progress: ', postfix=f'Loss {test_losses[-1]}'):
epoch_loss_aggregator = []
for input, target in tqdm(train_data):
train_loss = train_step(model, input, target, loss_function, optimizer)
epoch_loss_aggregator.append(train_loss)
train_losses.append(tf.reduce_mean(epoch_loss_aggregator))
test_loss = test_step(model, test_data, loss_function)
test_losses.append(test_loss)
# return plot with training metrics if desired
if plot:
plt.figure()
plt.plot(train_losses, label='train loss')
plt.plot(test_losses, label='test loss')
plt.xlabel('epoch')
plt.ylabel(f'{loss_function.name}')
plt.legend() |
py | b4026500a4671c5ddf5b7e1264038e2722bb1c44 |
import os
os.chdir("U:\\GitHub\\scGEAToolbox\\+run\\thirdparty\\harmony\\old")
import pandas as pd
import numpy as np
from scipy.cluster.vq import kmeans
from scipy.stats.stats import pearsonr
#import harmonypy as hm
from ccc import Harmony, run_harmony
meta_data = pd.read_csv("meta.csv")
data_mat = pd.read_csv("pcs.csv",header=None)
data_mat = np.array(data_mat)
vars_use = ['dataset']
ho = run_harmony(data_mat, meta_data, vars_use)
# Write the adjusted PCs to a new file.
res = pd.DataFrame(ho.Z_corr)
res.columns = ['X{}'.format(i + 1) for i in range(res.shape[1])]
res.to_csv("adj.tsv", sep = "\t", index = False)
|
py | b402654fe2db3a8629ca9cbe5e730f4837663253 | # Gunicorn configuration file.
# This file can be used from the Gunicorn cli with the ``-c`` paramater.
# Eg. ``gunicorn -c <config_file>``
import multiprocessing
bind = "0.0.0.0:5000"
workers = multiprocessing.cpu_count()
|
py | b40265bdab48d17e44a6bbc7a10df4e25536478d | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.precise import Precise
class kuna(Exchange):
def describe(self):
return self.deep_extend(super(kuna, self).describe(), {
'id': 'kuna',
'name': 'Kuna',
'countries': ['UA'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'CORS': None,
'spot': True,
'margin': None,
'swap': False,
'future': False,
'option': False,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchIsolatedPositions': False,
'fetchL3OrderBook': True,
'fetchLeverage': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'reduceMargin': False,
'setLeverage': False,
'setPositionMode': False,
'withdraw': None,
},
'timeframes': None,
'urls': {
'extension': '.json',
'referral': 'https://kuna.io?r=kunaid-gvfihe8az7o4',
'logo': 'https://user-images.githubusercontent.com/51840849/87153927-f0578b80-c2c0-11ea-84b6-74612568e9e1.jpg',
'api': {
'xreserve': 'https://api.xreserve.fund',
'v3': 'https://api.kuna.io',
'public': 'https://kuna.io', # v2
'private': 'https://kuna.io', # v2
},
'www': 'https://kuna.io',
'doc': 'https://kuna.io/documents/api',
'fees': 'https://kuna.io/documents/api',
},
'api': {
'xreserve': {
'get': {
'nonce': 1,
'fee': 1,
'delegated-transactions': 1,
},
'post': {
'delegate-transfer': 1,
},
},
'v3': {
'public': {
'get': {
'timestamp': 1,
'currencies': 1,
'markets': 1,
'tickers': 1,
'k': 1,
'trades_history': 1,
'fees': 1,
'exchange-rates': 1,
'exchange-rates/currency': 1,
'book/market': 1,
'kuna_codes/code/check': 1,
'landing_page_statistic': 1,
'translations/locale': 1,
'trades/market/hist': 1,
},
'post': {
'http_test': 1,
'deposit_channels': 1,
'withdraw_channels': 1,
'subscription_plans': 1,
'send_to': 1,
'confirm_token': 1,
'kunaid': 1,
'withdraw/prerequest': 1,
'deposit/prerequest': 1,
'deposit/exchange-rates': 1,
},
},
'sign': {
'get': {
'reset_password/token': 1,
},
'post': {
'signup/google': 1,
'signup/resend_confirmation': 1,
'signup': 1,
'signin': 1,
'signin/two_factor': 1,
'signin/resend_confirm_device': 1,
'signin/confirm_device': 1,
'reset_password': 1,
'cool-signin': 1,
},
'put': {
'reset_password/token': 1,
'signup/code/confirm': 1,
},
},
'private': {
'post': {
'auth/w/order/submit': 1,
'auth/r/orders': 1,
'auth/r/orders/market': 1,
'auth/r/orders/markets': 1,
'auth/api_tokens/delete': 1,
'auth/api_tokens/create': 1,
'auth/api_tokens': 1,
'auth/signin_history/uniq': 1,
'auth/signin_history': 1,
'auth/disable_withdraw_confirmation': 1,
'auth/change_password': 1,
'auth/deposit_address': 1,
'auth/announcements/accept': 1,
'auth/announcements/unaccepted': 1,
'auth/otp/deactivate': 1,
'auth/otp/activate': 1,
'auth/otp/secret': 1,
'auth/r/order/market/:order_id/trades': 1,
'auth/r/orders/market/hist': 1,
'auth/r/orders/hist': 1,
'auth/r/orders/hist/markets': 1,
'auth/r/orders/details': 1,
'auth/assets-history': 1,
'auth/assets-history/withdraws': 1,
'auth/assets-history/deposits': 1,
'auth/r/wallets': 1,
'auth/markets/favorites': 1,
'auth/markets/favorites/list': 1,
'auth/me/update': 1,
'auth/me': 1,
'auth/fund_sources': 1,
'auth/fund_sources/list': 1,
'auth/withdraw/resend_confirmation': 1,
'auth/withdraw': 1,
'auth/withdraw/details': 1,
'auth/withdraw/info': 1,
'auth/payment_addresses': 1,
'auth/deposit/prerequest': 1,
'auth/deposit/exchange-rates': 1,
'auth/deposit': 1,
'auth/deposit/details': 1,
'auth/deposit/info': 1,
'auth/kuna_codes/count': 1,
'auth/kuna_codes/details': 1,
'auth/kuna_codes/edit': 1,
'auth/kuna_codes/send-pdf': 1,
'auth/kuna_codes': 1,
'auth/kuna_codes/redeemed-by-me': 1,
'auth/kuna_codes/issued-by-me': 1,
'auth/payment_requests/invoice': 1,
'auth/payment_requests/type': 1,
'auth/referral_program/weekly_earnings': 1,
'auth/referral_program/stats': 1,
'auth/merchant/payout_services': 1,
'auth/merchant/withdraw': 1,
'auth/merchant/payment_services': 1,
'auth/merchant/deposit': 1,
'auth/verification/auth_token': 1,
'auth/kunaid_purchase/create': 1,
'auth/devices/list': 1,
'auth/sessions/list': 1,
'auth/subscriptions/reactivate': 1,
'auth/subscriptions/cancel': 1,
'auth/subscriptions/prolong': 1,
'auth/subscriptions/create': 1,
'auth/subscriptions/list': 1,
'auth/kuna_ids/list': 1,
'order/cancel/multi': 1,
'order/cancel': 1,
},
'put': {
'auth/fund_sources/id': 1,
'auth/kuna_codes/redeem': 1,
},
'delete': {
'auth/markets/favorites': 1,
'auth/fund_sources': 1,
'auth/devices': 1,
'auth/devices/list': 1,
'auth/sessions/list': 1,
'auth/sessions': 1,
},
},
},
'public': {
'get': [
'depth', # Get depth or specified market Both asks and bids are sorted from highest price to lowest.
'k_with_pending_trades', # Get K data with pending trades, which are the trades not included in K data yet, because there's delay between trade generated and processed by K data generator
'k', # Get OHLC(k line) of specific market
'markets', # Get all available markets
'order_book', # Get the order book of specified market
'order_book/{market}',
'tickers', # Get ticker of all markets
'tickers/{market}', # Get ticker of specific market
'timestamp', # Get server current time, in seconds since Unix epoch
'trades', # Get recent trades on market, each trade is included only once Trades are sorted in reverse creation order.
'trades/{market}',
],
},
'private': {
'get': [
'members/me', # Get your profile and accounts info
'deposits', # Get your deposits history
'deposit', # Get details of specific deposit
'deposit_address', # Where to deposit The address field could be empty when a new address is generating(e.g. for bitcoin), you should try again later in that case.
'orders', # Get your orders, results is paginated
'order', # Get information of specified order
'trades/my', # Get your executed trades Trades are sorted in reverse creation order.
'withdraws', # Get your cryptocurrency withdraws
'withdraw', # Get your cryptocurrency withdraw
],
'post': [
'orders', # Create a Sell/Buy order
'orders/multi', # Create multiple sell/buy orders
'orders/clear', # Cancel all my orders
'order/delete', # Cancel an order
'withdraw', # Create a withdraw
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.25 / 100,
'maker': 0.25 / 100,
},
'funding': {
'withdraw': {
'UAH': '1%',
'BTC': 0.001,
'BCH': 0.001,
'ETH': 0.01,
'WAVES': 0.01,
'GOL': 0.0,
'GBG': 0.0,
# 'RMC': 0.001 BTC
# 'ARN': 0.01 ETH
# 'R': 0.01 ETH
# 'EVR': 0.01 ETH
},
'deposit': {
# 'UAH': (amount) => amount * 0.001 + 5
},
},
},
'commonCurrencies': {
'PLA': 'Plair',
},
'exceptions': {
'2002': InsufficientFunds,
'2003': OrderNotFound,
},
})
async def fetch_time(self, params={}):
response = await self.publicGetTimestamp(params)
#
# 1594911427
#
return response * 1000
async def fetch_markets(self, params={}):
quotes = ['btc', 'rub', 'uah', 'usd', 'usdt', 'usdc']
markets = []
response = await self.publicGetTickers(params)
#
# {
# shibuah: {
# at: '1644463685',
# ticker: {
# buy: '0.000911',
# sell: '0.00092',
# low: '0.000872',
# high: '0.000963',
# last: '0.000911',
# vol: '1539278096.0',
# price: '1434244.211249'
# }
# }
# }
#
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
for j in range(0, len(quotes)):
quoteId = quotes[j]
# usd gets matched before usdt in usdtusd USDT/USD
# https://github.com/ccxt/ccxt/issues/9868
slicedId = id[1:]
index = slicedId.find(quoteId)
slice = slicedId[index:]
if (index > 0) and (slice == quoteId):
# usd gets matched before usdt in usdtusd USDT/USD
# https://github.com/ccxt/ccxt/issues/9868
baseId = id[0] + slicedId.replace(quoteId, '')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
markets.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': None,
'price': None,
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': None,
})
return markets
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 300
orderbook = await self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_timestamp(orderbook, 'timestamp')
return self.parse_order_book(orderbook, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'at')
ticker = ticker['ticker']
symbol = self.safe_symbol(None, market)
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': self.safe_string(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}, market, False)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
base = id[0:3]
quote = id[3:6]
base = base.upper()
quote = quote.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
result[symbol] = self.parse_ticker(response[id], market)
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTickersMarket(self.extend(request, params))
return self.parse_ticker(response, market)
async def fetch_l3_order_book(self, symbol, limit=None, params={}):
return await self.fetch_order_book(symbol, limit, params)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
symbol = None
if market:
symbol = market['symbol']
side = self.safe_string_2(trade, 'side', 'trend')
if side is not None:
sideMap = {
'ask': 'sell',
'bid': 'buy',
}
side = self.safe_string(sideMap, side, side)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'volume')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.safe_number(trade, 'funds')
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
orderId = self.safe_string(trade, 'order_id')
id = self.safe_string(trade, 'id')
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'order': orderId,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
ohlcvc = self.build_ohlcvc(trades, timeframe, since, limit)
result = []
for i in range(0, len(ohlcvc)):
ohlcv = ohlcvc[i]
result.append([
ohlcv[0],
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
])
return result
def parse_balance(self, response):
balances = self.safe_value(response, 'accounts')
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetMembersMe(params)
return self.parse_balance(response)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'market': self.market_id(symbol),
'side': side,
'volume': str(amount),
'ord_type': type,
}
if type == 'limit':
request['price'] = str(price)
response = await self.privatePostOrders(self.extend(request, params))
marketId = self.safe_value(response, 'market')
market = self.safe_value(self.markets_by_id, marketId)
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privatePostOrderDelete(self.extend(request, params))
order = self.parse_order(response)
status = order['status']
if status == 'closed' or status == 'canceled':
raise OrderNotFound(self.id + ' ' + self.json(order))
return order
def parse_order_status(self, status):
statuses = {
'done': 'closed',
'wait': 'open',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
marketId = self.safe_string(order, 'market')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
id = self.safe_string(order, 'id')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': self.safe_string(order, 'price'),
'stopPrice': None,
'amount': self.safe_string(order, 'volume'),
'filled': self.safe_string(order, 'executed_volume'),
'remaining': self.safe_string(order, 'remaining_volume'),
'trades': None,
'fee': None,
'info': order,
'cost': None,
'average': None,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': int(id),
}
response = await self.privateGetOrder(self.extend(request, params))
return self.parse_order(response)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.privateGetOrders(self.extend(request, params))
# todo emulation of fetchClosedOrders, fetchOrders, fetchOrder
# with order cache + fetchOpenOrders
# as in BTC-e, Liqui, Yobit, DSX, Tidex, WEX
return self.parse_orders(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.privateGetTradesMy(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def nonce(self):
return self.milliseconds()
def encode_params(self, params):
if 'orders' in params:
orders = params['orders']
query = self.urlencode(self.keysort(self.omit(params, 'orders')))
for i in range(0, len(orders)):
order = orders[i]
keys = list(order.keys())
for k in range(0, len(keys)):
key = keys[k]
value = order[key]
query += '&orders%5B%5D%5B' + key + '%5D=' + str(value)
return query
return self.urlencode(self.keysort(params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = None
if isinstance(api, list):
version, access = api
url = self.urls['api'][version] + '/' + version + '/' + self.implode_params(path, params)
if access == 'public':
if method == 'GET':
if params:
url += '?' + self.urlencode(params)
elif (method == 'POST') or (method == 'PUT'):
headers = {'Content-Type': 'application/json'}
body = self.json(params)
elif access == 'private':
raise NotSupported(self.id + ' private v3 API is not supported yet')
else:
request = '/api/' + self.version + '/' + self.implode_params(path, params)
if 'extension' in self.urls:
request += self.urls['extension']
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
query = self.encode_params(self.extend({
'access_key': self.apiKey,
'tonce': nonce,
}, params))
auth = method + '|' + request + '|' + query
signed = self.hmac(self.encode(auth), self.encode(self.secret))
suffix = query + '&signature=' + signed
if method == 'GET':
url += '?' + suffix
else:
body = suffix
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 400:
error = self.safe_value(response, 'error')
errorCode = self.safe_string(error, 'code')
feedback = self.id + ' ' + self.json(response)
self.throw_exactly_matched_exception(self.exceptions, errorCode, feedback)
# fallback to default error handler
|
py | b4026660daa3453c256df56a1fd355f0b4b9afa9 | # Select the column item from credit_records
# Use brackets and string notation
items = credit_records['item']
# Display the results
print(items)
# Select the column item from credit_records
# Use dot notation
items = credit_records.item
# Display the results
print(items)
# One or more lines of code contain errors.
# Fix the errors so that the code runs.
# Select the location column in credit_records
location = credit_records['location']
# Select the item column in credit_records
items = credit_records.item
# Display results
print(location)
# Use info() to inspect mpr
print(mpr.info())
# The following code contains one or more errors
# Correct the mistakes in the code so that it runs without errors
# Select column "Dog Name" from mpr
name = mpr['Dog Name']
# Select column "Missing?" from mpr
is_missing = mpr['Missing?']
# Display the columns
print(name)
print(is_missing)
|
py | b40266847930f35756d74cb71e0ece139ce50b68 | from __future__ import absolute_import
from sentry import features
from sentry.models import GroupSubscriptionReason, EventError
from sentry.utils.http import absolute_uri
from .base import ActivityEmail
def summarize_issues(issues):
rv = []
for issue in issues:
extra_info = None
msg_d = dict(issue['data'])
msg_d['type'] = issue['type']
if 'image_path' in issue['data']:
extra_info = issue['data']['image_path'].rsplit('/', 1)[-1]
if 'image_arch' in issue['data']:
extra_info = '%s (%s)' % (extra_info, issue['data']['image_arch'], )
rv.append({
'message': EventError.get_message(msg_d),
'extra_info': extra_info,
})
return rv
class NewProcessingIssuesActivityEmail(ActivityEmail):
def __init__(self, activity):
ActivityEmail.__init__(self, activity)
self.issues = summarize_issues(self.activity.data['issues'])
def get_participants(self):
return dict(
(user, GroupSubscriptionReason.processing_issue)
for user in self.project.get_mail_alert_subscribers()
)
def get_context(self):
return {
'project':
self.project,
'issues':
self.issues,
'reprocessing_active':
self.activity.data['reprocessing_active'],
'info_url':
absolute_uri(
'/{}/{}/settings/processing-issues/'.format(
self.organization.slug,
self.project.slug,
)
),
}
def get_subject(self):
has_new_teams = features.has(
'organizations:new-teams',
self.organization,
)
return u'Processing Issues on {}'.format(
self.project.slug if has_new_teams else self.project.name,
)
def get_template(self):
return 'sentry/emails/activity/new_processing_issues.txt'
def get_html_template(self):
return 'sentry/emails/activity/new_processing_issues.html'
|
py | b40267b93653c02c6c6468a95f27927c8e20394b | from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from django_q.cluster import Cluster
class Command(BaseCommand):
# Translators: help text for qcluster management command
help = _("Starts a Django Q Cluster.")
def add_arguments(self, parser):
parser.add_argument(
'--run-once',
action='store_true',
dest='run_once',
default=False,
help='Run once and then stop.',
)
def handle(self, *args, **options):
q = Cluster()
q.start()
if options.get('run_once', False):
q.stop()
|
py | b4026876c5dfc02d57e300dddd289947c14c0bf3 | # -*- coding: utf-8 -*-
"""
Spyder Editor
"""
import networkx as nx
import matplotlib.pyplot as plt
import random
import threading
import time
#Global variables
G = nx.Graph()
T = nx.Graph()
path = []
block = 0
total_requests = 10000
lamda_var = 500
mu_var = 1
chosen_graph = G
"""
#test map
T.add_edges_from([('A','B'),('C','B'),('A','C'),('C','D')])
nx.draw(T, with_labels=True)
"""
#Real map
G.add_node('Hannover', pos=(0, 0))
G.add_node('Hamburg', pos=(1.5, 10))
G.add_node('Berlin', pos=(75, 1.5))
G.add_node('Bremen', pos=(-35, 5.5))
G.add_node('Norden', pos=(-72, 10.5))
G.add_node('Dortmund', pos=(-60, -10))
G.add_node('Essen', pos=(-72, -9))
G.add_node('Düsseldorf', pos=(-80, -17))
G.add_node('Köln', pos=(-73, -25))
G.add_node('Frankfurt', pos=(-37, -34))
G.add_node('Leipzig', pos=(55, -14))
G.add_node('Mannheim', pos=(-50, -42))
G.add_node('Karlsruhe', pos=(-56, -51))
G.add_node('Nürnberg', pos=(35, -43))
G.add_node('Stuttgart', pos=(-35, -58))
G.add_node('Ulm', pos=(-7, -68))
G.add_node('München', pos=(45, -75))
G.add_edges_from([('Hannover', 'Hamburg'), ('Hamburg', 'Bremen'), ('Hamburg', 'Berlin'), ('Berlin', 'Leipzig'), ('Leipzig', 'Hannover'), ('Leipzig', 'Nürnberg'), ('Nürnberg', 'München'), ('Hannover', 'Bremen'), ('Norden', 'Bremen'), ('Hannover', 'Dortmund'), ('Dortmund', 'Essen'), ('Norden', 'Dortmund'), ('Essen', 'Düsseldorf'), ('Köln', 'Düsseldorf'), ('Dortmund', 'Köln'), ('Köln', 'Frankfurt'), ('Frankfurt', 'Mannheim'), ('Karlsruhe', 'Mannheim'), ('Karlsruhe', 'Stuttgart'), ('Stuttgart', 'Ulm'), ('Ulm', 'München'), ('Stuttgart', 'Nürnberg'), ('Frankfurt', 'Nürnberg'), ('Frankfurt', 'Leipzig'), ('Frankfurt', 'Hannover'), ('Hannover', 'Berlin')])
pos=nx.get_node_attributes(G,'pos')
nx.draw(G, pos, with_labels=True)
#Show map
plt.show()
#Manage resources
#all_nodes_test = { 1 :'A', 2 :'B', 3 :'C', 4 :'D'}
#all_nodes = { 1 :'Hannover', 2 :'Hamburg', 3 :'Berlin', 4 :'Bremen', 5 :'Norden', 6 :'Dortmund', 7 :'Essen', 8 :'Düsseldorf', 9 :'Köln', 10 :'Frankfurt', 11 : 'Leipzig', 12 :'Mannheim', 13 :'Karlsruhe', 14 :'Nürnberg', 15 :'Stuttgart', 16 :'Ulm', 17 :'München'}
all_nodes = ['Hannover', 'Hamburg', 'Berlin', 'Bremen', 'Norden', 'Dortmund', 'Essen', 'Düsseldorf', 'Köln', 'Frankfurt', 'Leipzig', 'Mannheim', 'Karlsruhe', 'Nürnberg', 'Stuttgart', 'Ulm', 'München']
#all_links_test = {'A, B': 'mot', 'A, C': 'hai', 'B, C': 'ba', 'C, D': 'tu', 'B, A': 'mot', 'C, A': 'hai', 'C, B': 'ba', 'D, C': 'tu'}
all_links = {'Hannover, Hamburg': 'mot', 'Hamburg, Bremen': 'hai', 'Hamburg, Berlin': 'ba', 'Berlin, Leipzig': 'tu', 'Leipzig, Hannover': 'nam', 'Leipzig, Nürnberg': 'sau', 'Nürnberg, München': 'bay', 'Hannover, Bremen': 'tam', 'Norden, Bremen': 'chin', 'Hannover, Dortmund': 'muoi', 'Dortmund, Essen': 'eleven', 'Norden, Dortmund': 'twelve', 'Essen, Düsseldorf': 'thirteen', 'Köln, Düsseldorf': 'forteen', 'Dortmund, Köln': 'fifteen', 'Köln, Frankfurt': 'sixteen', 'Frankfurt, Mannheim': 'seventeen', 'Karlsruhe, Mannheim': 'eighteen', 'Karlsruhe, Stuttgart': 'ninteen', 'Stuttgart, Ulm': 'twenty', 'Ulm, München': 'twenty-one', 'Stuttgart, Nürnberg': 'twenty-two', 'Frankfurt, Nürnberg': 'twenty-three', 'Frankfurt, Leipzig': 'twenty-four', 'Frankfurt, Hannover': 'twenty-five', 'Hannover, Berlin': 'twenty-six', 'Hamburg, Hannover': 'mot', 'Bremen, Hamburg': 'hai', 'Berlin, Hamburg': 'ba', 'Leipzig, Berlin': 'tu', 'Hannover, Leipzig': 'nam', 'Nürnberg, Leipzig': 'sau', 'München, Nürnberg': 'bay', 'Bremen, Hannover': 'tam', 'Bremen, Norden': 'chin', 'Dortmund, Hannover': 'muoi', 'Essen, Dortmund': 'eleven', 'Dortmund, Norden': 'twelve', 'Düsseldorf, Essen': 'thirteen', 'Düsseldorf, Köln': 'forteen', 'Köln, Dortmund': 'fifteen', 'Frankfurt, Köln': 'sixteen', 'Mannheim, Frankfurt': 'seventeen', 'Mannheim, Karlsruhe': 'eighteen', 'Stuttgart, Karlsruhe': 'ninteen', 'Ulm, Stuttgart': 'twenty', 'München, Ulm': 'twenty-one', 'Nürnberg, Stuttgart': 'twenty-two', 'Nürnberg, Frankfurt': 'twenty-three', 'Leipzig, Frankfurt': 'twenty-four', 'Hannover, Frankfurt': 'twenty-five', 'Berlin, Hannover': 'twenty-six'}
"""
color1 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color2 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color3 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color4 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color5 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color6 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color7 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
color8 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2}
"""
color1 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color2 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color3 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color4 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color5 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color6 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color7 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
color8 = {'mot': 2 , 'hai': 2 , 'ba' : 2 , 'tu': 2 , 'nam': 2 , 'sau': 2 , 'bay' : 2 , 'tam': 2 , 'chin': 2 , 'muoi': 2 , 'eleven' : 2 , 'twelve': 2 ,'thirteen': 2 , 'forteen': 2 , 'fifteen' : 2 , 'sixteen': 2 , 'seventeen': 2 , 'eighteen': 2 , 'ninteen' : 2 , 'twenty': 2 , 'twenty-one': 2 , 'twenty-two': 2 , 'twenty-three' : 2 , 'twenty-four': 2 , 'twenty-five': 2 , 'twenty-six': 2}
#Show current resources
def show_current_resources():
print ('Current status of the resources:')
print ('Color1 =',color1)
print ('Color2 =',color2)
print ('Color3 =',color3)
print ('Color4 =',color4)
print ('Color5 =',color5)
print ('Color6 =',color6)
print ('Color7 =',color7)
print ('Color8 =',color8)
#Find all paths
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if (start == end):
return [path]
paths = []
for node in set(graph.neighbors(start)) - set(path):
paths.extend(find_all_paths(graph, node, end, path))
return paths
#Find all links for each path and check each link to find all possible paths
def find_optimum_path_and_assign_proper_wavelength(threadName,ttl,all_paths = []):
final_possible_wavelengths = {}
possible_paths = []
possible_costs = []
#check each path
for x in range(0, len(all_paths)):
#print ('---------')
temp_check_link = []
check_path = all_paths[x]
#print('Path',str(x+1),':', check_path)
possible = ['color1', 'color2', 'color3', 'color4', 'color5', 'color6', 'color7', 'color8']
#Check each link
for y in range(0, len(check_path)-1):
dep = check_path[y]
des = check_path[y+1]
check_link = dep+', '+des
#print('Link',y+1,':',check_link)
temp_check_link.append(check_link)
#Find number for each link in dict all_links
for key in all_links:
if (key == check_link):
#print ('Key found:',key)
link_number = all_links.get(key)
break
#determine all possible wavelengths
if (color1[link_number]==0):
if ('color1' in possible) == True:
possible.remove('color1')
#print ('remove color1')
if (color2[link_number]==0):
if ('color2' in possible) == True:
possible.remove('color2')
#print ('remove color2')
if (color3[link_number]==0):
if ('color3' in possible) == True:
possible.remove('color3')
#print ('remove color3')
if (color4[link_number]==0):
if ('color4' in possible) == True:
possible.remove('color4')
#print ('remove color4')
if (color5[link_number]==0):
if ('color5' in possible) == True:
possible.remove('color5')
#print ('remove color5')
if (color6[link_number]==0):
if ('color6' in possible) == True:
possible.remove('color6')
#print ('remove color6')
if (color7[link_number]==0):
if ('color7' in possible) == True:
possible.remove('color7')
#print ('remove color7')
if (color8[link_number]==0):
if ('color8' in possible) == True:
possible.remove('color8')
#print ('remove color8')
#If could find a possible path
if (len(possible) > 0):
possible_paths.append(check_path)
"""
print ('Possible wavelengths:',possible)
print ('Choose wavelength:',possible[0])
"""
#Assume that path is optimum by trying assigning it with one of possible wavelengths (possible[0])
for key in temp_check_link:
if (possible[0] == 'color1'):
color1[all_links.get(key)] -= 1
elif (possible[0] == 'color2'):
color2[all_links.get(key)] -= 1
elif (possible[0] == 'color3'):
color3[all_links.get(key)] -= 1
elif (possible[0] == 'color4'):
color4[all_links.get(key)] -= 1
elif (possible[0] == 'color5'):
color5[all_links.get(key)] -= 1
elif (possible[0] == 'color6'):
color6[all_links.get(key)] -= 1
elif (possible[0] == 'color7'):
color7[all_links.get(key)] -= 1
elif (possible[0] == 'color8'):
color8[all_links.get(key)] -= 1
#show the current status of the resources (debug)
#show_current_resources()
###Calculate total cost in each case
cost = 0
for key in all_links:
if (key == 'Hamburg, Hannover'):
break
else:
if (color1[all_links.get(key)] == 1):
cost += 4
elif (color1[all_links.get(key)] == 0):
cost += 16
if (color2[all_links.get(key)] == 1):
cost += 4
elif (color2[all_links.get(key)] == 0):
cost += 16
if (color3[all_links.get(key)] == 1):
cost += 4
elif (color3[all_links.get(key)] == 0):
cost += 16
if (color4[all_links.get(key)] == 1):
cost += 4
elif (color4[all_links.get(key)] == 0):
cost += 16
if (color5[all_links.get(key)] == 1):
cost += 4
elif (color5[all_links.get(key)] == 0):
cost += 16
if (color6[all_links.get(key)] == 1):
cost += 4
elif (color6[all_links.get(key)] == 0):
cost += 16
if (color7[all_links.get(key)] == 1):
cost += 4
elif (color7[all_links.get(key)] == 0):
cost += 16
if (color8[all_links.get(key)] == 1):
cost += 4
elif (color8[all_links.get(key)] == 0):
cost += 16
possible_costs.append(cost)
final_possible_wavelengths[tuple(check_path)] = tuple(possible)[0]
#Undo assigning to return to its initial status and prepare for other possbible assignings
for key in temp_check_link:
if (possible[0] == 'color1'):
color1[all_links.get(key)] += 1
elif (possible[0] == 'color2'):
color2[all_links.get(key)] += 1
elif (possible[0] == 'color3'):
color3[all_links.get(key)] += 1
elif (possible[0] == 'color4'):
color4[all_links.get(key)] += 1
elif (possible[0] == 'color5'):
color5[all_links.get(key)] += 1
elif (possible[0] == 'color6'):
color6[all_links.get(key)] += 1
elif (possible[0] == 'color7'):
color7[all_links.get(key)] += 1
elif (possible[0] == 'color8'):
color8[all_links.get(key)] += 1
"""
#Print final results with optimum path
print ('---------')
print ('IN SUMMARY:')
"""
if (len(possible_paths)>0):
optimum_path = possible_paths[possible_costs.index(min(possible_costs))]
#print ('Possible paths:',possible_paths)
"""
print ('\nPossible costs:',possible_costs)
print ('\nOptimum cost:',min(possible_costs))
print ('\nOptimum path:',optimum_path)
print ('\nChosen wavelength for assigning the optimum path:',final_possible_wavelengths[tuple(optimum_path)])
"""
#Officially assigning optimum path to the system
official_links = []
for y in range(0, len(optimum_path)-1):
dep = optimum_path[y]
des = optimum_path[y+1]
official_links.append(dep+', '+des)
for key in official_links:
chosen_wavelength = final_possible_wavelengths[tuple(optimum_path)]
if (chosen_wavelength == 'color1'):
color1[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color2'):
color2[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color3'):
color3[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color4'):
color4[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color5'):
color5[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color6'):
color6[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color7'):
color7[all_links.get(key)] -= 1
elif (chosen_wavelength == 'color8'):
color8[all_links.get(key)] -= 1
#print("\nStarting TTL: "+ str(ttl) + "s")
time.sleep(ttl)
#Release back the resources
for key in official_links:
chosen_wavelength = final_possible_wavelengths[tuple(optimum_path)]
if (chosen_wavelength == 'color1'):
color1[all_links.get(key)] += 1
elif (chosen_wavelength == 'color2'):
color2[all_links.get(key)] += 1
elif (chosen_wavelength == 'color3'):
color3[all_links.get(key)] += 1
elif (chosen_wavelength == 'color4'):
color4[all_links.get(key)] += 1
elif (chosen_wavelength == 'color5'):
color5[all_links.get(key)] += 1
elif (chosen_wavelength == 'color6'):
color6[all_links.get(key)] += 1
elif (chosen_wavelength == 'color7'):
color7[all_links.get(key)] += 1
elif (chosen_wavelength == 'color8'):
color8[all_links.get(key)] += 1
#show_current_resources()
else:
#print ('\nThere are not any possible paths for this request - This connection will be blocked.')
global block
block += 1
#print ('\nNumber of blocked connections: '+ str(block))
#Multithread
class myThread (threading.Thread):
def __init__(self, ID, name, start1, end, ttl):
threading.Thread.__init__(self)
self.ID = ID
self.name = name
self.start1 = start1
self.end = end
self.ttl = ttl
def run(self):
#print ("\nStarting " + self.name)
find_optimum_path_and_assign_proper_wavelength(self.name,self.ttl,find_all_paths(chosen_graph, self.start1, self.end, path))
#print ("\nExiting " + self.name)
#Create new threads pools
i=0
threads = []
#Initiator
while (i<total_requests):
lamda = random.expovariate(lamda_var)
mu = random.expovariate(mu_var)
start = random.choice(all_nodes)
all_nodes.remove(start)
#print (all_nodes1)
end = random.choice(all_nodes)
all_nodes.append(start)
#print (all_nodes1)
time.sleep(lamda)
#print("\nWaited " + str(lamda) + "s"+ " for request-" + str(i+1))
print ("\nIncoming request-" + str(i+1) + ": " + start + " -> " + end)
thread = myThread(i+1, "Request-"+str(i+1),start,end,mu)
threads.append(thread)
thread.start()
i += 1
#Summary
for x in threads:
x.join()
print ("\nDone!")
print ('\nNumber of blocked connections: '+ str(block))
#show_current_resources()
print ("\nTotal requests: " + str(total_requests))
print ("\nLamda: " + str(lamda_var) + "; Mu: " + str(mu_var))
"""
#find all paths
#start = 'C'
#end = 'D'
start = 'Hamburg'
end = 'Karlsruhe'
#all_paths = cp.copy(find_paths(chosen_graph, start, end, path))
print('FROM',start,'TO',end,':')
print('There are intuitively total',len(find_all_paths(chosen_graph, start, end, path)),'path(s) :')
find_optimum_path_and_assign_proper_wavelength("Thread",0.01,find_all_paths(chosen_graph, start, end, path))
"""
|
py | b40268c7b3ceb46ad91a61e9fea611edd2b6669a | # Created by MechAviv
# ID :: [924020000]
# Hidden Street : Demon Slayer's Childhood Home
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.completeQuestNoRewards(23200)
sm.deleteQuest(23200)
sm.completeQuestNoRewards(23201)
sm.deleteQuest(23201)
sm.completeQuestNoRewards(23202)
sm.deleteQuest(23202)
|
py | b40269151e00d831f00a28448768cf6d003c9352 | # timer.py
# this will get some re-writes to handle numba
from contextlib import ContextDecorator
from dataclasses import dataclass, field
import time
from typing import Any, Callable, ClassVar, Dict, Optional
class TimerError(Exception):
"""A custom exception used to report errors in use of Timer class"""
@dataclass
class Timer(ContextDecorator):
"""Time your code using a class, context manager, or decorator"""
timers: ClassVar[Dict[str, float]] = dict()
name: Optional[str] = None
_start_time: Optional[float] = field(default=None, init=False, repr=False)
def __post_init__(self) -> None:
"""Initialization: add timer to dict of timers"""
if self.name:
self.timers.setdefault(self.name, 0)
def start(self) -> None:
"""Start a new timer"""
if self._start_time is not None:
raise TimerError(f"Timer is running. Use .stop() to stop it")
self._start_time = time.perf_counter()
def stop(self) -> float:
"""Stop the timer, and report the elapsed time"""
if self._start_time is None:
raise TimerError(f"Timer is not running. Use .start() to start it")
# Calculate elapsed time
elapsed_time = time.perf_counter() - self._start_time
self._start_time = None
# Report elapsed time
if self.name:
self.timers[self.name] += elapsed_time
return elapsed_time
def __enter__(self) -> "Timer":
"""Start a new timer as a context manager"""
self.start()
return self
def __exit__(self, *exc_info: Any) -> None:
"""Stop the context manager timer"""
self.stop()
|
py | b4026b344b0d25712a24ca1a99452a4229aaa7a0 | import numpy as np
import pandas as pd
import pytest
from pypfopt import black_litterman
from pypfopt.black_litterman import BlackLittermanModel
from pypfopt import risk_models, expected_returns
from tests.utilities_for_tests import get_data, get_market_caps, resource
def test_input_errors():
df = get_data()
S = risk_models.sample_cov(df)
views = pd.Series(0.1, index=S.columns)
# Insufficient args
with pytest.raises(TypeError):
BlackLittermanModel(S)
assert BlackLittermanModel(S, Q=views)
with pytest.raises(ValueError):
BlackLittermanModel(S, Q=views, tau=-0.1)
# P and Q don't match dimensions
P = np.eye(len(S))[:, :-1]
with pytest.raises(AssertionError):
# This doesn't raise the error from the expected place!
# Because default_omega uses matrix mult on P
BlackLittermanModel(S, Q=views, P=P)
# P not an DataFrame or ndarray
with pytest.raises(TypeError):
BlackLittermanModel(S, Q=views[:-1], P=1.0)
with pytest.raises(AssertionError):
BlackLittermanModel(S, Q=views, P=P, omega=np.eye(len(views)))
# pi and S don't match dimensions
with pytest.raises(AssertionError):
BlackLittermanModel(S, Q=views, pi=df.pct_change().mean()[:-1])
# If pi=="market" then market_caps must be supplied
with pytest.raises(ValueError):
BlackLittermanModel(S, Q=views, pi="market")
# pi's valid numerical types are Series, DataFrame and ndarray
with pytest.raises(TypeError):
BlackLittermanModel(S, Q=views, pi=[0.1] * len(S))
# risk_aversion cannot be negative
with pytest.raises(ValueError):
BlackLittermanModel(S, Q=views, risk_aversion=-0.01)
# omega must be ndarray, DataFrame and string
with pytest.raises(TypeError):
BlackLittermanModel(S, Q=views, omega=1.0)
def test_parse_views():
df = get_data()
S = risk_models.sample_cov(df)
viewlist = ["AAPL", 0.20, "GOOG", -0.30, "XOM", 0.40] # incorrect type
viewdict = {"AAPL": 0.20, "GOOG": -0.30, "XOM": 0.40, "fail": 0.1}
with pytest.raises(TypeError):
bl = BlackLittermanModel(S, absolute_views=viewlist)
with pytest.raises(ValueError):
bl = BlackLittermanModel(S, absolute_views=viewdict)
del viewdict["fail"]
bl = BlackLittermanModel(S, absolute_views=viewdict)
# Check the picking matrix is correct
test_P = np.copy(bl.P)
test_P[0, 1] -= 1
test_P[1, 0] -= 1
test_P[2, 13] -= 1
np.testing.assert_array_equal(test_P, np.zeros((len(bl.Q), bl.n_assets)))
# Check views vector is correct
np.testing.assert_array_equal(bl.Q, np.array([0.20, -0.30, 0.40]).reshape(-1, 1))
def test_dataframe_input():
df = get_data()
S = risk_models.sample_cov(df)
view_df = pd.DataFrame(pd.Series(0.1, index=S.columns))
bl = BlackLittermanModel(S, Q=view_df)
np.testing.assert_array_equal(bl.P, np.eye(len(view_df)))
# views on the first 10 assets
view_df = pd.DataFrame(pd.Series(0.1, index=S.columns)[:10])
# P's index and columns labels are ignored when a DataFrame is used:
picking = pd.DataFrame(np.eye(len(S))[:10, :])
assert BlackLittermanModel(S, Q=view_df, P=picking)
prior_df = df.pct_change().mean()
assert BlackLittermanModel(S, pi=prior_df, Q=view_df, P=picking)
omega_df = S.iloc[:10, :10]
assert BlackLittermanModel(S, pi=prior_df, Q=view_df, P=picking, omega=omega_df)
def test_cov_ndarray():
df = get_data()
prior_df = df.pct_change().mean()
S = risk_models.sample_cov(df)
views = pd.Series(0.1, index=S.columns)
bl = BlackLittermanModel(S, pi=prior_df, Q=views)
bl_nd = BlackLittermanModel(S.to_numpy(), pi=prior_df.to_numpy(), Q=views)
# Compare without missing ticker index values.
np.testing.assert_equal(bl_nd.bl_returns().to_numpy(), bl.bl_returns().to_numpy())
np.testing.assert_equal(bl_nd.bl_cov().to_numpy(), bl.bl_cov().to_numpy())
assert list(bl_nd.bl_weights().values()) == list(bl.bl_weights().values())
def test_default_omega():
df = get_data()
S = risk_models.sample_cov(df)
views = pd.Series(0.1, index=S.columns)
bl = BlackLittermanModel(S, Q=views)
# Check square and diagonal
assert bl.omega.shape == (len(S), len(S))
np.testing.assert_array_equal(bl.omega, np.diag(np.diagonal(bl.omega)))
# In this case, we should have omega = tau * diag(S)
np.testing.assert_array_almost_equal(np.diagonal(bl.omega), bl.tau * np.diagonal(S))
def test_bl_returns_no_prior():
df = get_data()
S = risk_models.sample_cov(df)
viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
bl = BlackLittermanModel(S, absolute_views=viewdict)
rets = bl.bl_returns()
# Make sure it gives the same answer as explicit inverse
test_rets = np.linalg.inv(
np.linalg.inv(bl.tau * bl.cov_matrix) + bl.P.T @ np.linalg.inv(bl.omega) @ bl.P
) @ (bl.P.T @ np.linalg.inv(bl.omega) @ bl.Q)
np.testing.assert_array_almost_equal(rets.values.reshape(-1, 1), test_rets)
def test_bl_equal_prior():
df = get_data()
S = risk_models.sample_cov(df)
viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
bl = BlackLittermanModel(S, absolute_views=viewdict, pi="equal")
np.testing.assert_array_almost_equal(bl.pi, np.ones((20, 1)) * 0.05)
bl.bl_weights()
np.testing.assert_allclose(
bl.portfolio_performance(),
(0.1877432247395778, 0.3246889329226965, 0.5166274785827545),
)
def test_bl_returns_all_views():
df = get_data()
prior = expected_returns.ema_historical_return(df)
S = risk_models.CovarianceShrinkage(df).ledoit_wolf()
views = pd.Series(0.1, index=S.columns)
bl = BlackLittermanModel(S, pi=prior, Q=views)
posterior_rets = bl.bl_returns()
assert isinstance(posterior_rets, pd.Series)
assert list(posterior_rets.index) == list(df.columns)
assert posterior_rets.notnull().all()
assert posterior_rets.dtype == "float64"
np.testing.assert_array_almost_equal(
posterior_rets,
np.array(
[
0.11168648,
0.16782938,
0.12516799,
0.24067997,
0.32848296,
-0.22789895,
0.16311297,
0.11928542,
0.25414308,
0.11007738,
0.06282615,
-0.03140218,
-0.16977172,
0.05254821,
-0.10463884,
0.32173375,
0.26399864,
0.1118594,
0.22999558,
0.08977448,
]
),
)
def test_bl_relative_views():
df = get_data()
S = risk_models.CovarianceShrinkage(df).ledoit_wolf()
# 1. SBUX will drop by 20%
# 2. GOOG outperforms FB by 10%
# 3. BAC and JPM will outperform T and GE by 15%
views = np.array([-0.20, 0.10, 0.15]).reshape(-1, 1)
picking = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0, 0, 0.5, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.5, 0],
]
)
bl = BlackLittermanModel(S, Q=views, P=picking)
rets = bl.bl_returns()
assert rets["SBUX"] < 0
assert rets["GOOG"] > rets["FB"]
assert (rets["BAC"] > rets["T"]) and (rets["JPM"] > rets["GE"])
def test_bl_cov_default():
df = get_data()
cov_matrix = risk_models.CovarianceShrinkage(df).ledoit_wolf()
viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
bl = BlackLittermanModel(cov_matrix, absolute_views=viewdict)
S = bl.bl_cov()
assert S.shape == (20, 20)
assert S.index.equals(df.columns)
assert S.index.equals(S.columns)
assert S.notnull().all().all()
def test_market_risk_aversion():
prices = pd.read_csv(
resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
)
delta = black_litterman.market_implied_risk_aversion(prices)
assert np.round(delta, 5) == 2.68549
# check it works for df
prices = pd.read_csv(resource("spy_prices.csv"), parse_dates=True, index_col=0)
delta = black_litterman.market_implied_risk_aversion(prices)
assert np.round(delta.iloc[0], 5) == 2.68549
# Check it raises for other types.
list_invalid = [100.0, 110.0, 120.0, 130.0]
with pytest.raises(TypeError):
delta = black_litterman.market_implied_risk_aversion(list_invalid)
def test_bl_weights():
df = get_data()
S = risk_models.sample_cov(df)
viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
bl = BlackLittermanModel(S, absolute_views=viewdict)
prices = pd.read_csv(
resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
)
delta = black_litterman.market_implied_risk_aversion(prices)
bl.bl_weights(delta)
w = bl.clean_weights()
assert abs(sum(w.values()) - 1) < 1e-5
# check weights are allocated in same direction as views
# (in absence of priors)
assert all(viewdict[t] * w[t] >= 0 for t in viewdict)
# numerical check
test_weights = {
"GOOG": 0.0,
"AAPL": 1.40675,
"FB": 0.0,
"BABA": 0.0,
"AMZN": 0.0,
"GE": 0.0,
"AMD": 0.0,
"WMT": 0.0,
"BAC": 0.02651,
"GM": 0.0,
"T": 2.81117,
"UAA": 0.0,
"SHLD": 0.0,
"XOM": 0.0,
"RRC": 0.0,
"BBY": -1.44667,
"MA": 0.0,
"PFE": 0.0,
"JPM": 0.0,
"SBUX": -1.79776,
}
assert w == test_weights
bl = BlackLittermanModel(S, absolute_views=viewdict)
bl.optimize(delta)
w2 = bl.clean_weights()
assert w2 == w
bl = BlackLittermanModel(S, absolute_views=pd.Series(viewdict))
bl.optimize(delta)
w2 = bl.clean_weights()
assert w2 == w
def test_market_implied_prior():
df = get_data()
S = risk_models.sample_cov(df)
prices = pd.read_csv(
resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
)
delta = black_litterman.market_implied_risk_aversion(prices)
mcaps = get_market_caps()
pi = black_litterman.market_implied_prior_returns(mcaps, delta, S)
assert isinstance(pi, pd.Series)
assert list(pi.index) == list(df.columns)
assert pi.notnull().all()
assert pi.dtype == "float64"
np.testing.assert_array_almost_equal(
pi.values,
np.array(
[
0.14933293,
0.2168623,
0.11219185,
0.10362374,
0.28416295,
0.12196098,
0.19036819,
0.08860159,
0.17724273,
0.08779627,
0.0791797,
0.16460474,
0.12854665,
0.08657863,
0.11230036,
0.13875465,
0.15017163,
0.09066484,
0.1696369,
0.13270213,
]
),
)
mcaps = pd.Series(mcaps)
pi2 = black_litterman.market_implied_prior_returns(mcaps, delta, S)
pd.testing.assert_series_equal(pi, pi2, check_exact=False)
# Test alternate syntax
bl = BlackLittermanModel(
S,
pi="market",
market_caps=mcaps,
absolute_views={"AAPL": 0.1},
risk_aversion=delta,
)
pi = black_litterman.market_implied_prior_returns(mcaps, delta, S, risk_free_rate=0)
np.testing.assert_array_almost_equal(bl.pi, pi.values.reshape(-1, 1))
def test_bl_market_prior():
df = get_data()
S = risk_models.sample_cov(df)
prices = pd.read_csv(
resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
)
delta = black_litterman.market_implied_risk_aversion(prices)
mcaps = get_market_caps()
with pytest.warns(RuntimeWarning):
black_litterman.market_implied_prior_returns(mcaps, delta, S.values)
prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)
viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict)
rets = bl.bl_returns()
# compare posterior with prior
for v in viewdict:
assert (prior[v] <= rets[v] <= viewdict[v]) or (
viewdict[v] <= rets[v] <= prior[v]
)
with pytest.raises(ValueError):
bl.portfolio_performance()
bl.bl_weights(delta)
np.testing.assert_allclose(
bl.portfolio_performance(),
(0.2580693114409672, 0.265445955488424, 0.8968654692926723),
)
# Check that bl.cov() has been called and used
assert bl.posterior_cov is not None
def test_bl_market_automatic():
df = get_data()
S = risk_models.sample_cov(df)
mcaps = get_market_caps()
viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
bl = BlackLittermanModel(S, pi="market", absolute_views=viewdict, market_caps=mcaps)
rets = bl.bl_returns()
# Compare with explicit
prior = black_litterman.market_implied_prior_returns(mcaps, 1, S, 0)
bl2 = BlackLittermanModel(S, pi=prior, absolute_views=viewdict)
rets2 = bl2.bl_returns()
pd.testing.assert_series_equal(rets, rets2)
# def test_bl_mcap_order_invariance():
# df = get_data()
# S = risk_models.sample_cov(df)
# mcaps = get_market_caps()
# mcaps2 = {k: v for k, v in list(mcaps.items())[::-1]}
# # mcaps = pd.Series(mcaps)
# market_prior1 = black_litterman.market_implied_prior_returns(mcaps, 2, S.values, 0)
# market_prior2 = black_litterman.market_implied_prior_returns(mcaps2, 2, S.values, 0)
# market_prior1 == market_prior2
# mcaps = pd.Series(mcaps)
# mcaps2 = pd.Series(mcaps2)
# mkt_weights1 = mcaps / mcaps.sum()
# mkt_weights2 = mcaps2 / mcaps2.sum()
# # Pi is excess returns so must add risk_free_rate to get return.
# pd.testing.assert_series_equal(
# S.dot(mkt_weights1), S.dot(mkt_weights2), check_exact=False
# )
# S.values.dot(mkt_weights1)
# S.values.dot(mkt_weights2)
# return risk_aversion * cov_matrix.dot(mkt_weights) + risk_free_rate
# viewdict = {"BABA": 0, "AAPL": -0.30, "GOOG": 0.40, "FB": 0.30}
# bl = BlackLittermanModel(
# S,
# pi="market",
# absolute_views=viewdict,
# market_caps=mcaps,
# risk_aversion=2,
# risk_free_rate=0,
# )
# bl.pi.ravel() == market_prior
# # bl2 = BlackLittermanModel(S, pi=market_prior, absolute_views=viewdict)
# # bl2.pi
def test_bl_tau():
df = get_data()
S = risk_models.sample_cov(df)
prices = pd.read_csv(
resource("spy_prices.csv"), parse_dates=True, index_col=0, squeeze=True
)
delta = black_litterman.market_implied_risk_aversion(prices)
mcaps = get_market_caps()
prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)
viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
# Need to change omega for this test to work
omega = np.diag([0.01, 0.01, 0.01, 0.01])
bl0 = BlackLittermanModel(
S, pi=prior, absolute_views=viewdict, tau=1e-10, omega=omega
)
bl1 = BlackLittermanModel(
S, pi=prior, absolute_views=viewdict, tau=0.01, omega=omega
)
bl2 = BlackLittermanModel(
S, pi=prior, absolute_views=viewdict, tau=0.1, omega=omega
)
# For tiny tau, posterior should roughly equal prior
np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5)
# For bigger tau, GOOG should be given more weight
assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"]
assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]
def test_bl_no_uncertainty():
df = get_data()
S = risk_models.sample_cov(df)
omega = np.diag([0, 0, 0, 0])
viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
bl = BlackLittermanModel(S, absolute_views=viewdict, omega=omega)
rets = bl.bl_returns()
# For 100% confidence, posterior return should equal view return.
for k, v in viewdict.items():
assert np.abs(rets[k] - v) < 1e-5
# If only one view has 100% confidencee, only that asset will have post = prior.
omega = np.diag([0, 0.2, 0.2, 0.2])
bl = BlackLittermanModel(S, absolute_views=viewdict, omega=omega)
rets = bl.bl_returns()
assert np.abs(bl.bl_returns()["GOOG"] - viewdict["GOOG"]) < 1e-5
assert np.abs(rets["AAPL"] - viewdict["AAPL"]) > 0.01
def test_idzorek_confidences_error():
# if no confidences have been passed
S = pd.DataFrame(np.diag(np.ones((5,))), index=range(5), columns=range(5))
# Constant view of 0.3 return
views = {k: 0.3 for k in range(5)}
# Prior
pi = pd.Series(0.1, index=range(5))
with pytest.raises(ValueError):
BlackLittermanModel(S, pi=pi, absolute_views=views, omega="idzorek")
with pytest.raises(ValueError):
# Wrong number of views
BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[0.2] * 4
)
with pytest.raises(ValueError):
# Conf greater than 1
BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[1.1] * 5
)
with pytest.raises(ValueError):
# Conf less than zero
BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[-0.1] * 5
)
def test_idzorek_basic():
# Identity covariance
S = pd.DataFrame(np.diag(np.ones((5,))), index=range(5), columns=range(5))
# Constant view of 0.3 return
views = {k: 0.3 for k in range(5)}
# Prior
pi = pd.Series(0.1, index=range(5))
# Perfect confidence - should equal views
bl = BlackLittermanModel(
S, pi=pi, absolute_views=views, omega=np.diag(np.zeros(5)) # perfect confidence
)
pd.testing.assert_series_equal(bl.bl_returns(), pd.Series([0.3] * 5))
# No confidence - should equal priors
bl = BlackLittermanModel(S, pi=pi, absolute_views=views, omega=S * 1e6)
pd.testing.assert_series_equal(bl.bl_returns(), pi)
# Idzorek 100% confidence
bl = BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[1] * 5
)
np.testing.assert_array_almost_equal(bl.omega, np.zeros((5, 5)))
pd.testing.assert_series_equal(bl.bl_returns(), pd.Series(0.3, index=range(5)))
# Idzorek 0% confidence
bl = BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[0] * 5
)
np.testing.assert_array_almost_equal(bl.omega, np.diag([1e6] * 5))
pd.testing.assert_series_equal(bl.bl_returns(), pi)
# Idzorek confidence range
for i, conf in enumerate(np.arange(0, 1.2, 0.2)):
bl = BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[conf] * 5
)
# Linear spacing
np.testing.assert_almost_equal(bl.bl_returns()[0], 0.1 + i * 0.2 / 5)
def test_idzorek_input_formats():
# Identity covariance
S = pd.DataFrame(np.diag(np.ones((5,))), index=range(5), columns=range(5))
# Constant view of 0.3 return
views = {k: 0.3 for k in range(5)}
# Prior
pi = pd.Series(0.1, index=range(5))
test_result = pd.Series(0.2, index=range(5))
bl = BlackLittermanModel(
S, pi=pi, absolute_views=views, omega="idzorek", view_confidences=[0.5] * 5
)
pd.testing.assert_series_equal(bl.bl_returns(), test_result)
bl = BlackLittermanModel(
S,
pi=pi,
absolute_views=views,
omega="idzorek",
view_confidences=(0.5, 0.5, 0.5, 0.5, 0.5),
)
pd.testing.assert_series_equal(bl.bl_returns(), test_result)
bl = BlackLittermanModel(
S,
pi=pi,
absolute_views=views,
omega="idzorek",
view_confidences=np.array([0.5] * 5),
)
pd.testing.assert_series_equal(bl.bl_returns(), test_result)
bl = BlackLittermanModel(
S,
pi=pi,
absolute_views=views,
omega="idzorek",
view_confidences=np.array([0.5] * 5).reshape(-1, 1),
)
pd.testing.assert_series_equal(bl.bl_returns(), test_result)
def test_idzorek_with_priors():
df = get_data()
S = risk_models.sample_cov(df)
mcaps = get_market_caps()
viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}
bl = BlackLittermanModel(
S,
pi="market",
market_caps=mcaps,
absolute_views=viewdict,
omega="idzorek",
view_confidences=[1, 1, 0.25, 0.25],
)
rets = bl.bl_returns()
assert bl.omega[0, 0] == 0
np.testing.assert_almost_equal(rets["AAPL"], -0.3)
with pytest.raises(ValueError):
bl.portfolio_performance()
bl.bl_weights()
np.testing.assert_allclose(
bl.portfolio_performance(),
(0.943431295405105, 0.5361412623208567, 1.722365653051476),
)
# Check that bl.cov() has been called and used
assert bl.posterior_cov is not None
|
py | b4026b4f55ad2d32c7469aa79ebf908eea307161 | # Programa que vai aprovar ou não o emprestimo de acordo:
# Valor da casa, salário do comprador, quantos anos pra pagar
# o valor da prestação não pode ultrapassar 30% do salário
valor_casa = float(input("Informe o valor da casa: "))
salario = float(input("Qual é o seu salário R$: "))
prazo = int(input("Qual é o prazo em anos para pagar"))
prestacao = valor_casa / (prazo * 12)
if prestacao > (salario * 0.3):
print("Infelizmente não é possível fazer esse empréstimo")
else:
print("É possível fazer o empréstimo")
|
py | b4026ba5051794a0ae2b701c57afe22449081ad0 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Store interface is heavily based on Kubeflow/piplines metadata_helper.py, see original code at:
# https://github.com/kubeflow/pipelines/blob/master/backend/metadata_writer/src/metadata_helpers.py
import logging
import time
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def value_to_mlmd_value(value) -> metadata_store_pb2.Value:
if value is None:
return metadata_store_pb2.Value()
if isinstance(value, int):
return metadata_store_pb2.Value(int_value=value)
if isinstance(value, float):
return metadata_store_pb2.Value(double_value=value)
return metadata_store_pb2.Value(string_value=str(value))
def connect(address: str = None) -> mlmd.metadata_store.MetadataStore:
if address is None:
# If address is None we will use a fake in memory database for testing
logging.info("Using in memory database.")
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.fake_database.SetInParent()
return mlmd.metadata_store.MetadataStore(connection_config)
else:
logging.info("Connecting to database at {address}")
host, port = address.split(":")
mlmd_connection_config = metadata_store_pb2.MetadataStoreClientConfig(
host=host,
port=int(port),
)
# Checking the connection to the Metadata store.
for _ in range(100):
try:
return mlmd.metadata_store.MetadataStore(mlmd_connection_config)
except Exception as e:
logging.error(f'Failed to access the Metadata store. Exception: "{e}"')
time.sleep(1)
raise RuntimeError("Could not connect to the Metadata store.")
def get_or_create_artifact_type(
store: mlmd.metadata_store.MetadataStore, type_name: str, properties: dict = None
) -> metadata_store_pb2.ArtifactType:
try:
artifact_type = store.get_artifact_type(type_name=type_name)
return artifact_type
except:
artifact_type = metadata_store_pb2.ArtifactType(
name=type_name,
properties=properties,
)
artifact_type.id = store.put_artifact_type(artifact_type) # Returns ID
return artifact_type
def update_or_create_artifact(
store: mlmd.metadata_store.MetadataStore,
type_name: str,
artifact: metadata_store_pb2.Artifact,
) -> metadata_store_pb2.Artifact:
# We assume that type already exists in database
artifact_type = store.get_artifact_type(type_name=type_name)
# This will be None if artifact does not exist
existing_artifact = store.get_artifact_by_type_and_name(
artifact_type.name, artifact.name
)
if existing_artifact is not None:
artifact.id = existing_artifact.id
artifact.type_id = artifact_type.id
artifact.id = store.put_artifacts([artifact])[0]
return artifact
|
py | b4026ccecf8aafa17a8b93722b3be421a01ad563 | import abc
from time import sleep
from office365.runtime.client_request_exception import ClientRequestException
from office365.runtime.queries.read_entity_query import ReadEntityQuery
class ClientRuntimeContext(object):
def __init__(self):
"""
Client runtime context
"""
@property
def current_query(self):
return self.pending_request().current_query
def execute_query_retry(self, max_retry=5, timeout_secs=5, success_callback=None, failure_callback=None):
"""
Executes the current set of data retrieval queries and method invocations and retries it if needed.
:param int max_retry: Number of times to retry the request
:param int timeout_secs: Seconds to wait before retrying the request.
:param (ClientObject)-> None success_callback:
:param (int)-> None failure_callback:
"""
for retry in range(1, max_retry):
try:
self.execute_query()
if callable(success_callback):
success_callback(self.pending_request().current_query.return_type)
break
except ClientRequestException:
self.add_query(self.pending_request().current_query, True)
sleep(timeout_secs)
if callable(failure_callback):
failure_callback(retry)
@abc.abstractmethod
def pending_request(self):
"""
:rtype: office365.runtime.client_request.ClientRequest
"""
pass
@abc.abstractmethod
def service_root_url(self):
pass
@abc.abstractmethod
def authenticate_request(self, request):
"""
:type request: office365.runtime.http.request_options.RequestOptions
"""
pass
def build_request(self):
return self.pending_request().build_request()
def load(self, client_object, properties_to_retrieve=None):
"""Prepare retrieval query
:type properties_to_retrieve: list[str] or None
:type client_object: office365.runtime.client_object.ClientObject
"""
qry = ReadEntityQuery(client_object, properties_to_retrieve)
self.pending_request().add_query(qry)
return qry
def before_execute(self, action, once=True):
"""
Attach an event handler which is triggered before request is submitted to server
:param (RequestOptions) -> None action:
:param bool once:
:return: None
"""
def _process_request(request):
if once:
self.pending_request().beforeExecute -= _process_request
action(request)
self.pending_request().beforeExecute += _process_request
def before_execute_query(self, action):
"""
Attach an event handler which is triggered before query is submitted to server
:param (RequestOptions) -> None action:
:return: None
"""
def _prepare_request(request):
qry = self.pending_request().current_query
action(qry)
self.pending_request().beforeExecute += _prepare_request
def after_execute_query(self, action):
"""
Attach an event handler which is triggered after query is submitted to server
:param (RequestOptions) -> None action:
:return: None
"""
def _process_response(response):
qry = self.pending_request().current_query
action(qry)
self.pending_request().afterExecute += _process_response
def after_execute(self, action, once=True):
"""
Attach an event handler which is triggered after request is submitted to server
:param (RequestOptions) -> None action:
:param bool once:
:return: None
"""
def _process_response(response):
if once:
self.pending_request().afterExecute -= _process_response
action(response)
self.pending_request().afterExecute += _process_response
def execute_request_direct(self, request):
"""
:type request: RequestOptions
"""
return self.pending_request().execute_request_direct(request)
def execute_query(self):
self.pending_request().execute_query()
def add_query(self, query, to_begin=False):
"""
Adds query to internal queue
:type to_begin: bool
:type query: ClientQuery
"""
self.pending_request().add_query(query, to_begin)
def remove_query(self, query):
self.pending_request().remove_query(query)
def clear_queries(self):
self.pending_request().queries.clear()
|
py | b4026cf470a2e1bff1e037a3abc48339ace03ef9 | ##########################################################################
#
# Copyright (c) 2019, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
def addPruningActions( editor ) :
if isinstance( editor, GafferUI.Viewer ) :
editor.keyPressSignal().connect( __pruningKeyPress, scoped = False )
def __pruningKeyPress( viewer, event ) :
if event.key not in ( "Backspace", "Delete" ) :
return False
if event.modifiers != event.Modifiers.Control :
# We require a modifier for now, because being able to delete
# directly in the Viewer is a significant change, and we're
# worried it could happen unnoticed by someone trying to
# delete a _node_ instead. But we swallow the event anyway, to
# reserve the unmodified keypress for our use in a future where
# a Gaffer viewer with rich interaction might not be so
# unexpected.
return True
if not isinstance( viewer.view(), GafferSceneUI.SceneView ) :
return False
editScope = viewer.view().editScope()
if editScope is None or Gaffer.MetadataAlgo.readOnly( editScope ) :
# We return True even when we don't do anything, so the keypress doesn't
# leak out and get used to delete nodes in the node graph.
## \todo Add a discreet notification system to the Viewer so we can
# prompt the user to select a scope etc when necessary. Maybe we might
# also want to ask them if we can prune a common ancestor in the case
# that all its descendants are selected?
return True
viewedNode = viewer.view()["in"].getInput().node()
if editScope != viewedNode and editScope not in Gaffer.NodeAlgo.upstreamNodes( viewedNode ) :
# Spare folks from deleting things in a downstream EditScope.
## \todo When we have a nice Viewer notification system we
# should emit a warning here.
return True
if GafferScene.EditScopeAlgo.prunedReadOnlyReason( editScope ) is not None :
return True
# \todo This needs encapsulating in EditScopeAlgo some how so we don't need
# to interact with processors directly.
with viewer.getContext() :
if not editScope["enabled"].getValue() :
# Spare folks from deleting something when it won't be
# apparent what they've done until they reenable the
# EditScope.
return True
pruningProcessor = editScope.acquireProcessor( "PruningEdits", createIfNecessary = False )
if pruningProcessor is not None and not pruningProcessor["enabled"].getValue() :
return True
sceneGadget = viewer.view().viewportGadget().getPrimaryChild()
selection = sceneGadget.getSelection()
if not selection.isEmpty() :
with Gaffer.UndoScope( editScope.ancestor( Gaffer.ScriptNode ) ) :
GafferScene.EditScopeAlgo.setPruned( editScope, selection, True )
return True
|
py | b4026d3ce331fe4f59ce916373bdba47f2245906 | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A library of classes representing computations in a deserialized form."""
import abc
import enum
from typing import Any, Iterable, List, Optional, Tuple, Type
import zlib
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.compiler import intrinsic_defs
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.types import typed_object
def _check_computation_oneof(
computation_proto: pb.Computation,
expected_computation_oneof: Optional[str],
):
"""Checks that `computation_proto` is a oneof of the expected variant."""
computation_oneof = computation_proto.WhichOneof('computation')
if computation_oneof != expected_computation_oneof:
raise TypeError('Expected a {} computation, found {}.'.format(
expected_computation_oneof, computation_oneof))
class UnexpectedBlockError(TypeError):
def __init__(self, expected: Type['ComputationBuildingBlock'],
actual: 'ComputationBuildingBlock'):
message = f'Expected block of kind {expected}, found block {actual}'
super().__init__(message)
self.actual = actual
self.expected = expected
class ComputationBuildingBlock(typed_object.TypedObject, metaclass=abc.ABCMeta):
"""The abstract base class for abstractions in the TFF's internal language.
Instances of this class correspond roughly one-to-one to the abstractions
defined in the `Computation` message in TFF's `computation.proto`, and are
intended primarily for the ease of manipulating the abstract syntax trees
(AST) of federated computations as they are transformed by TFF's compiler
pipeline to mold into the needs of a particular execution backend. The only
abstraction that does not have a dedicated Python equivalent is a section
of TensorFlow code (it's represented by `tff.framework.CompiledComputation`).
"""
_deserializer_dict = None # Defined at the end of this file.
@classmethod
def from_proto(
cls: Type['ComputationBuildingBlock'],
computation_proto: pb.Computation,
) -> 'ComputationBuildingBlock':
"""Returns an instance of a derived class based on 'computation_proto'.
Args:
computation_proto: An instance of pb.Computation.
Returns:
An instance of a class that implements 'ComputationBuildingBlock' and
that contains the deserialized logic from in 'computation_proto'.
Raises:
NotImplementedError: if computation_proto contains a kind of computation
for which deserialization has not been implemented yet.
ValueError: if deserialization failed due to the argument being invalid.
"""
py_typecheck.check_type(computation_proto, pb.Computation)
computation_oneof = computation_proto.WhichOneof('computation')
deserializer = cls._deserializer_dict.get(computation_oneof)
if deserializer is not None:
deserialized = deserializer(computation_proto)
type_spec = type_serialization.deserialize_type(computation_proto.type)
if not deserialized.type_signature.is_equivalent_to(type_spec):
raise ValueError(
'The type {} derived from the computation structure does not '
'match the type {} declared in its signature'.format(
deserialized.type_signature, type_spec))
return deserialized
else:
raise NotImplementedError(
'Deserialization for computations of type {} has not been '
'implemented yet.'.format(computation_oneof))
return deserializer(computation_proto)
def __init__(self, type_spec):
"""Constructs a computation building block with the given TFF type.
Args:
type_spec: An instance of types.Type, or something convertible to it via
types.to_type().
"""
type_signature = computation_types.to_type(type_spec)
self._type_signature = type_signature
self._cached_hash = None
self._cached_proto = None
@property
def type_signature(self) -> computation_types.Type:
return self._type_signature
def compact_representation(self):
"""Returns the compact string representation of this building block."""
return _string_representation(self, formatted=False)
def formatted_representation(self):
"""Returns the formatted string representation of this building block."""
return _string_representation(self, formatted=True)
def structural_representation(self):
"""Returns the structural string representation of this building block."""
return _structural_representation(self)
def check_reference(self):
"""Check that this is a 'Reference'."""
if not self.is_reference():
raise UnexpectedBlockError(Reference, self)
def is_reference(self):
"""Returns whether or not this block is a `Reference`."""
return False
def check_selection(self):
"""Check that this is a 'Selection'."""
if not self.is_selection():
raise UnexpectedBlockError(Selection, self)
def is_selection(self):
"""Returns whether or not this block is a `Selection`."""
return False
def check_struct(self):
"""Check that this is a `Struct`."""
if not self.is_struct():
raise UnexpectedBlockError(Struct, self)
def is_struct(self):
"""Returns whether or not this block is a `Struct`."""
return False
def check_call(self):
"""Check that this is a 'Call'."""
if not self.is_call():
raise UnexpectedBlockError(Call, self)
def is_call(self):
"""Returns whether or not this block is a `Call`."""
return False
def check_lambda(self):
"""Check that this is a 'Lambda'."""
if not self.is_lambda():
raise UnexpectedBlockError(Lambda, self)
def is_lambda(self):
"""Returns whether or not this block is a `Lambda`."""
return False
def check_block(self):
"""Check that this is a 'Block'."""
if not self.is_block():
raise UnexpectedBlockError(Block, self)
def is_block(self):
"""Returns whether or not this block is a `Block`."""
return False
def check_intrinsic(self):
"""Check that this is an 'Intrinsic'."""
if not self.is_intrinsic():
raise UnexpectedBlockError(Intrinsic, self)
def is_intrinsic(self):
"""Returns whether or not this block is an `Intrinsic`."""
return False
def check_data(self):
"""Check that this is a 'Data'."""
if not self.is_data():
raise UnexpectedBlockError(Data, self)
def is_data(self):
"""Returns whether or not this block is a `Data`."""
return False
def check_compiled_computation(self):
"""Check that this is a 'CompiledComputation'."""
if not self.is_compiled_computation():
raise UnexpectedBlockError(CompiledComputation, self)
def is_compiled_computation(self):
"""Returns whether or not this block is a `CompiledComputation`."""
return False
def check_placement(self):
"""Check that this is a 'Placement'."""
if not self.is_placement():
raise UnexpectedBlockError(Placement, self)
def is_placement(self):
"""Returns whether or not this block is a `Placement`."""
return False
@property
def proto(self):
"""Returns a serialized form of this object as a pb.Computation instance."""
if self._cached_proto is None:
self._cached_proto = self._proto()
return self._cached_proto
@abc.abstractmethod
def _proto(self):
"""Uncached, internal version of `proto`."""
raise NotImplementedError
# TODO(b/113112885): Add memoization after identifying a suitable externally
# available standard library that works in Python 2/3.
@abc.abstractmethod
def __repr__(self):
"""Returns a full-form representation of this computation building block."""
raise NotImplementedError
def __str__(self):
"""Returns a concise representation of this computation building block."""
return self.compact_representation()
def __hash__(self):
if self._cached_hash is None:
self._cached_hash = self._uncached_hash()
return self._cached_hash
@abc.abstractmethod
def _uncached_hash(self):
raise NotImplementedError
class Reference(ComputationBuildingBlock):
"""A reference to a name defined earlier in TFF's internal language.
Names are defined by lambda expressions (which have formal named parameters),
and block structures (which can have one or more locals). The reference
construct is used to refer to those parameters or locals by a string name.
The usual hiding rules apply. A reference binds to the closest definition of
the given name in the most deeply nested surrounding lambda or block.
A concise notation for a reference to name `foo` is `foo`. For example, in
a lambda expression `(x -> f(x))` there are two references, one to `x` that
is defined as the formal parameter of the lambda epxression, and one to `f`
that must have been defined somewhere in the surrounding context.
"""
@classmethod
def from_proto(
cls: Type['Reference'],
computation_proto: pb.Computation,
) -> 'Reference':
_check_computation_oneof(computation_proto, 'reference')
return cls(
str(computation_proto.reference.name),
type_serialization.deserialize_type(computation_proto.type))
def __init__(self, name, type_spec, context=None):
"""Creates a reference to 'name' of type 'type_spec' in context 'context'.
Args:
name: The name of the referenced entity.
type_spec: The type spec of the referenced entity.
context: The optional context in which the referenced entity is defined.
This class does not prescribe what Python type the 'context' needs to be
and merely exposes it as a property (see below). The only requirement is
that the context implements str() and repr().
Raises:
TypeError: if the arguments are of the wrong types.
"""
py_typecheck.check_type(name, str)
super().__init__(type_spec)
self._name = name
self._context = context
def _proto(self):
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
reference=pb.Reference(name=self._name))
def is_reference(self):
return True
@property
def name(self):
return self._name
@property
def context(self):
return self._context
def _uncached_hash(self):
return hash((self._name, self.type_signature))
def __repr__(self):
return 'Reference(\'{}\', {!r}{})'.format(
self._name, self.type_signature,
', {!r}'.format(self._context) if self._context is not None else '')
class Selection(ComputationBuildingBlock):
"""A selection by name or index from a struct-typed value in TFF's language.
The concise syntax for selections is `foo.bar` (selecting a named `bar` from
the value of expression `foo`), and `foo[n]` (selecting element at index `n`
from the value of `foo`).
"""
@classmethod
def from_proto(
cls: Type['Selection'],
computation_proto: pb.Computation,
) -> 'Selection':
_check_computation_oneof(computation_proto, 'selection')
selection = ComputationBuildingBlock.from_proto(
computation_proto.selection.source)
return cls(selection, index=computation_proto.selection.index)
def __init__(self, source, name=None, index=None):
"""A selection from 'source' by a string or numeric 'name_or_index'.
Exactly one of 'name' or 'index' must be specified (not None).
Args:
source: The source value to select from (an instance of
ComputationBuildingBlock).
name: A string name of the element to be selected.
index: A numeric index of the element to be selected.
Raises:
TypeError: if arguments are of the wrong types.
ValueError: if the name is empty or index is negative, or the name/index
is not compatible with the type signature of the source, or neither or
both are defined (not None).
"""
py_typecheck.check_type(source, ComputationBuildingBlock)
if name is None and index is None:
raise ValueError(
'Must define either a name or index, and neither was specified.')
if name is not None and index is not None:
raise ValueError(
'Cannot simultaneously specify a name and an index, choose one.')
source_type = source.type_signature
if not source_type.is_struct():
raise TypeError('Expected the source of selection to be a TFF struct, '
'instead found it to be of type {}.'.format(source_type))
if name is not None:
py_typecheck.check_type(name, str)
if not name:
raise ValueError('The name of the selected element cannot be empty.')
# Normalize, in case we are dealing with a Unicode type or some such.
name = str(name)
if not structure.has_field(source_type, name):
raise ValueError(
f'Error selecting named field `{name}` from type `{source_type}`, '
f'whose only named fields are {structure.name_list(source_type)}.')
type_signature = source_type[name]
else:
py_typecheck.check_type(index, int)
length = len(source_type)
if index < 0 or index >= length:
raise ValueError(
f'The index `{index}` does not fit into the valid range in the '
f'struct type: 0..{length}')
type_signature = source_type[index]
super().__init__(type_signature)
self._source = source
self._name = name
self._index = index
def _proto(self):
selection = pb.Selection(source=self._source.proto, index=self.as_index())
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
selection=selection)
def _uncached_hash(self):
return hash((self._source, self._name, self._index))
def is_selection(self):
return True
@property
def source(self) -> ComputationBuildingBlock:
return self._source
@property
def name(self) -> Optional[str]:
return self._name
@property
def index(self) -> Optional[int]:
return self._index
def as_index(self) -> int:
if self._index is not None:
return self._index
else:
field_to_index = structure.name_to_index_map(self.source.type_signature)
return field_to_index[self._name]
def __repr__(self):
if self._name is not None:
return 'Selection({!r}, name=\'{}\')'.format(self._source, self._name)
else:
return 'Selection({!r}, index={})'.format(self._source, self._index)
class Struct(ComputationBuildingBlock, structure.Struct):
"""A struct with named or unnamed elements in TFF's internal language.
The concise notation for structs is `<name_1=value_1, ...., name_n=value_n>`
for structs with named elements, `<value_1, ..., value_n>` for structs with
unnamed elements, or a mixture of these for structs with some named and some
unnamed elements, where `name_k` are the names, and `value_k` are the value
expressions.
For example, a lambda expression that applies `fn` to elements of 2-structs
pointwise could be represented as `(arg -> <fn(arg[0]),fn(arg[1])>)`.
"""
@classmethod
def from_proto(
cls: Type['Struct'],
computation_proto: pb.Computation,
) -> 'Struct':
_check_computation_oneof(computation_proto, 'struct')
return cls([(str(e.name) if e.name else None,
ComputationBuildingBlock.from_proto(e.value))
for e in computation_proto.struct.element])
def __init__(self, elements, container_type=None):
"""Constructs a struct from the given list of elements.
Args:
elements: The elements of the struct, supplied as a list of (name, value)
pairs, where 'name' can be None in case the corresponding element is not
named and only accessible via an index (see also `structure.Struct`).
container_type: An optional Python container type to associate with the
struct.
Raises:
TypeError: if arguments are of the wrong types.
"""
# Not using super() here and below, as the two base classes have different
# signatures of their constructors, and the struct implementation
# of selection interfaces should override that in the generic class 'Value'
# to favor simplified expressions where simplification is possible.
def _map_element(e):
"""Returns a named or unnamed element."""
if isinstance(e, ComputationBuildingBlock):
return (None, e)
elif py_typecheck.is_name_value_pair(
e, name_required=False, value_type=ComputationBuildingBlock):
if e[0] is not None and not e[0]:
raise ValueError('Unexpected struct element with empty string name.')
return (e[0], e[1])
else:
raise TypeError('Unexpected struct element: {}.'.format(e))
elements = [_map_element(e) for e in elements]
element_pairs = [((e[0],
e[1].type_signature) if e[0] else e[1].type_signature)
for e in elements]
if container_type is None:
type_signature = computation_types.StructType(element_pairs)
else:
type_signature = computation_types.StructWithPythonType(
element_pairs, container_type)
ComputationBuildingBlock.__init__(self, type_signature)
structure.Struct.__init__(self, elements)
def _proto(self):
elements = []
for k, v in structure.iter_elements(self):
if k is not None:
element = pb.Struct.Element(name=k, value=v.proto)
else:
element = pb.Struct.Element(value=v.proto)
elements.append(element)
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
struct=pb.Struct(element=elements))
def _uncached_hash(self):
return structure.Struct.__hash__(self)
def is_struct(self):
return True
def __repr__(self):
def _element_repr(element):
name, value = element
name_repr = '\'{}\''.format(name) if name is not None else 'None'
return '({}, {!r})'.format(name_repr, value)
return 'Struct([{}])'.format(', '.join(
_element_repr(e) for e in structure.iter_elements(self)))
class Call(ComputationBuildingBlock):
"""A representation of a function invocation in TFF's internal language.
The call construct takes an argument struct with two elements, the first being
the function to invoke (represented as a computation with a functional result
type), and the second being the argument to feed to that function. Typically,
the function is either a TFF instrinsic, or a lambda expression.
The concise notation for calls is `foo(bar)`, where `foo` is the function,
and `bar` is the argument.
"""
@classmethod
def from_proto(
cls: Type['Call'],
computation_proto: pb.Computation,
) -> 'Call':
_check_computation_oneof(computation_proto, 'call')
fn = ComputationBuildingBlock.from_proto(computation_proto.call.function)
arg_proto = computation_proto.call.argument
if arg_proto.WhichOneof('computation') is not None:
arg = ComputationBuildingBlock.from_proto(arg_proto)
else:
arg = None
return cls(fn, arg)
def __init__(self, fn, arg=None):
"""Creates a call to 'fn' with argument 'arg'.
Args:
fn: A value of a functional type that represents the function to invoke.
arg: The optional argument, present iff 'fn' expects one, of a type that
matches the type of 'fn'.
Raises:
TypeError: if the arguments are of the wrong types.
"""
py_typecheck.check_type(fn, ComputationBuildingBlock)
if arg is not None:
py_typecheck.check_type(arg, ComputationBuildingBlock)
if not fn.type_signature.is_function():
raise TypeError('Expected fn to be of a functional type, '
'but found that its type is {}.'.format(
fn.type_signature))
if fn.type_signature.parameter is not None:
if arg is None:
raise TypeError('The invoked function expects an argument of type {}, '
'but got None instead.'.format(
fn.type_signature.parameter))
if not fn.type_signature.parameter.is_assignable_from(arg.type_signature):
raise TypeError(
'The parameter of the invoked function is expected to be of '
'type {}, but the supplied argument is of an incompatible '
'type {}.'.format(fn.type_signature.parameter, arg.type_signature))
elif arg is not None:
raise TypeError(
'The invoked function does not expect any parameters, but got '
'an argument of type {}.'.format(py_typecheck.type_string(type(arg))))
super().__init__(fn.type_signature.result)
# By now, this condition should hold, so we only double-check in debug mode.
assert (arg is not None) == (fn.type_signature.parameter is not None)
self._function = fn
self._argument = arg
def _proto(self):
if self._argument is not None:
call = pb.Call(
function=self._function.proto, argument=self._argument.proto)
else:
call = pb.Call(function=self._function.proto)
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature), call=call)
def _uncached_hash(self):
return hash((self._function, self._argument))
def is_call(self):
return True
@property
def function(self):
return self._function
@property
def argument(self):
return self._argument
def __repr__(self):
if self._argument is not None:
return 'Call({!r}, {!r})'.format(self._function, self._argument)
else:
return 'Call({!r})'.format(self._function)
class Lambda(ComputationBuildingBlock):
"""A representation of a lambda expression in TFF's internal language.
A lambda expression consists of a string formal parameter name, and a result
expression that can contain references by name to that formal parameter. A
concise notation for lambdas is `(foo -> bar)`, where `foo` is the name of
the formal parameter, and `bar` is the result expression.
"""
@classmethod
def from_proto(
cls: Type['Lambda'],
computation_proto: pb.Computation,
) -> 'Lambda':
_check_computation_oneof(computation_proto, 'lambda')
the_lambda = getattr(computation_proto, 'lambda')
return cls(
str(the_lambda.parameter_name),
type_serialization.deserialize_type(
computation_proto.type.function.parameter),
ComputationBuildingBlock.from_proto(the_lambda.result))
def __init__(
self,
parameter_name: Optional[str],
parameter_type: Optional[Any],
result: ComputationBuildingBlock,
):
"""Creates a lambda expression.
Args:
parameter_name: The (string) name of the parameter accepted by the lambda.
This name can be used by Reference() instances in the body of the lambda
to refer to the parameter. Note that an empty parameter name shall be
treated as equivalent to no parameter.
parameter_type: The type of the parameter, an instance of types.Type or
something convertible to it by types.to_type().
result: The resulting value produced by the expression that forms the body
of the lambda. Must be an instance of ComputationBuildingBlock.
Raises:
TypeError: if the arguments are of the wrong types.
"""
if parameter_name == '': # pylint: disable=g-explicit-bool-comparison
parameter_name = None
if (parameter_name is None) != (parameter_type is None):
raise TypeError(
'A lambda expression must have either a valid parameter name and type '
'or both parameter name and type must be `None`. '
'`parameter_name` was {} but `parameter_type` was {}.'.format(
parameter_name, parameter_type))
if parameter_name is not None:
py_typecheck.check_type(parameter_name, str)
parameter_type = computation_types.to_type(parameter_type)
py_typecheck.check_type(result, ComputationBuildingBlock)
super().__init__(
computation_types.FunctionType(parameter_type, result.type_signature))
self._parameter_name = parameter_name
self._parameter_type = parameter_type
self._result = result
def _proto(self) -> pb.Computation:
type_signature = type_serialization.serialize_type(self.type_signature)
fn = pb.Lambda(
parameter_name=self._parameter_name, result=self._result.proto)
# We are unpacking the lambda argument here because `lambda` is a reserved
# keyword in Python, but it is also the name of the parameter for a
# `pb.Computation`.
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#keyword-conflicts
return pb.Computation(type=type_signature, **{'lambda': fn}) # pytype: disable=wrong-keyword-args
def _uncached_hash(self):
return hash((self._parameter_name, self._parameter_type, self._result))
def is_lambda(self):
return True
@property
def parameter_name(self) -> Optional[str]:
return self._parameter_name
@property
def parameter_type(self) -> Optional[computation_types.Type]:
return self._parameter_type
@property
def result(self) -> ComputationBuildingBlock:
return self._result
def __repr__(self) -> str:
return 'Lambda(\'{}\', {!r}, {!r})'.format(self._parameter_name,
self._parameter_type,
self._result)
class Block(ComputationBuildingBlock):
"""A representation of a block of code in TFF's internal language.
A block is a syntactic structure that consists of a sequence of local name
bindings followed by a result. The bindings are interpreted sequentially,
with bindings later in the sequence in the scope of those listed earlier,
and the result in the scope of the entire sequence. The usual hiding rules
apply.
An informal concise notation for blocks is the following, with `name_k`
representing the names defined locally for the block, `value_k` the values
associated with them, and `result` being the expression that reprsents the
value of the block construct.
```
let name_1=value_1, name_2=value_2, ..., name_n=value_n in result
```
Blocks are technically a redundant abstraction, as they can be equally well
represented by lambda expressions. A block of the form `let x=y in z` is
roughly equivalent to `(x -> z)(y)`. Although redundant, blocks have a use
as a way to reduce TFF computation ASTs to a simpler, less nested and more
readable form, and are helpful in AST transformations as a mechanism that
prevents possible naming conflicts.
An example use of a block expression to flatten a nested structure below:
```
z = federated_sum(federated_map(x, federated_broadcast(y)))
```
An equivalent form in a more sequential notation using a block expression:
```
let
v1 = federated_broadcast(y),
v2 = federated_map(x, v1)
in
federated_sum(v2)
```
"""
@classmethod
def from_proto(
cls: Type['Block'],
computation_proto: pb.Computation,
) -> 'Block':
_check_computation_oneof(computation_proto, 'block')
return cls([(str(loc.name), ComputationBuildingBlock.from_proto(loc.value))
for loc in computation_proto.block.local],
ComputationBuildingBlock.from_proto(
computation_proto.block.result))
def __init__(
self,
local_symbols: Iterable[Tuple[str, ComputationBuildingBlock]],
result: ComputationBuildingBlock,
):
"""Creates a block of TFF code.
Args:
local_symbols: The list of one or more local declarations, each of which
is a 2-tuple (name, value), with 'name' being the string name of a local
symbol being defined, and 'value' being the instance of
ComputationBuildingBlock, the output of which will be locally bound to
that name.
result: An instance of ComputationBuildingBlock that computes the result.
Raises:
TypeError: if the arguments are of the wrong types.
"""
updated_locals = []
for index, element in enumerate(local_symbols):
if (not isinstance(element, tuple) or (len(element) != 2) or
not isinstance(element[0], str)):
raise TypeError(
'Expected the locals to be a list of 2-element structs with string '
'name as their first element, but this is not the case for the '
'local at position {} in the sequence: {}.'.format(index, element))
name = element[0]
value = element[1]
py_typecheck.check_type(value, ComputationBuildingBlock)
updated_locals.append((name, value))
py_typecheck.check_type(result, ComputationBuildingBlock)
super().__init__(result.type_signature)
self._locals = updated_locals
self._result = result
def _proto(self) -> pb.Computation:
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
block=pb.Block(
**{
'local': [
pb.Block.Local(name=k, value=v.proto)
for k, v in self._locals
],
'result': self._result.proto
}))
def _uncached_hash(self):
return hash((tuple(self._locals), self._result))
def is_block(self):
return True
@property
def locals(self) -> List[Tuple[str, ComputationBuildingBlock]]:
return list(self._locals)
@property
def result(self) -> ComputationBuildingBlock:
return self._result
def __repr__(self) -> str:
return 'Block([{}], {!r})'.format(
', '.join('(\'{}\', {!r})'.format(k, v) for k, v in self._locals),
self._result)
class Intrinsic(ComputationBuildingBlock):
"""A representation of an intrinsic in TFF's internal language.
An instrinsic is a symbol known to the TFF's compiler pipeline, represented
as a known URI. It generally appears in expressions with a concrete type,
although all intrinsic are defined with template types. This class does not
deal with parsing intrinsic URIs and verifying their types, it is only a
container. Parsing and type analysis are a responsibility of the components
that manipulate ASTs. See intrinsic_defs.py for the list of known intrinsics.
"""
@classmethod
def from_proto(
cls: Type['Intrinsic'],
computation_proto: pb.Computation,
) -> 'Intrinsic':
_check_computation_oneof(computation_proto, 'intrinsic')
return cls(computation_proto.intrinsic.uri,
type_serialization.deserialize_type(computation_proto.type))
def __init__(self, uri: str, type_signature: computation_types.Type):
"""Creates an intrinsic.
Args:
uri: The URI of the intrinsic.
type_signature: A `tff.Type`, the type of the intrinsic.
Raises:
TypeError: if the arguments are of the wrong types.
"""
py_typecheck.check_type(uri, str)
py_typecheck.check_type(type_signature, computation_types.Type)
intrinsic_def = intrinsic_defs.uri_to_intrinsic_def(uri)
if intrinsic_def is not None:
# Note: this is really expensive.
type_analysis.check_concrete_instance_of(type_signature,
intrinsic_def.type_signature)
super().__init__(type_signature)
self._uri = uri
def _proto(self) -> pb.Computation:
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
intrinsic=pb.Intrinsic(uri=self._uri))
def intrinsic_def(self) -> intrinsic_defs.IntrinsicDef:
intrinsic_def = intrinsic_defs.uri_to_intrinsic_def(self._uri)
if intrinsic_def is None:
raise ValueError(
'Failed to retrieve definition of intrinsic with URI '
f'`{self._uri}`. Perhaps a definition needs to be added to '
'`intrinsic_defs.py`?')
return intrinsic_def
def _uncached_hash(self):
return hash((self._uri, self.type_signature))
def is_intrinsic(self):
return True
@property
def uri(self) -> str:
return self._uri
def __repr__(self) -> str:
return 'Intrinsic(\'{}\', {!r})'.format(self._uri, self.type_signature)
class Data(ComputationBuildingBlock):
"""A representation of data (an input pipeline).
This class does not deal with parsing data URIs and verifying correctness,
it is only a container. Parsing and type analysis are a responsibility
or a component external to this module.
"""
@classmethod
def from_proto(
cls: Type['Data'],
computation_proto: pb.Computation,
) -> 'Data':
_check_computation_oneof(computation_proto, 'data')
return cls(computation_proto.data.uri,
type_serialization.deserialize_type(computation_proto.type))
def __init__(self, uri: str, type_spec: Any):
"""Creates a representation of data.
Args:
uri: The URI that characterizes the data.
type_spec: Either the types.Type that represents the type of this data, or
something convertible to it by types.to_type().
Raises:
TypeError: if the arguments are of the wrong types.
ValueError: if the user tries to specify an empty URI.
"""
py_typecheck.check_type(uri, str)
if not uri:
raise ValueError('Empty string cannot be passed as URI to Data.')
if type_spec is None:
raise TypeError(
'Intrinsic {} cannot be created without a TFF type.'.format(uri))
type_spec = computation_types.to_type(type_spec)
super().__init__(type_spec)
self._uri = uri
def _proto(self) -> pb.Computation:
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
data=pb.Data(uri=self._uri))
def _uncached_hash(self):
return hash((self._uri, self.type_signature))
def is_data(self):
return True
@property
def uri(self) -> str:
return self._uri
def __repr__(self) -> str:
return 'Data(\'{}\', {!r})'.format(self._uri, self.type_signature)
class CompiledComputation(ComputationBuildingBlock):
"""A representation of a fully constructed and serialized computation.
A compile comutation is one that has not been parsed into constituents, and
is simply represented as an embedded `Computation` protocol buffer. Whereas
technically, any computation can be represented and passed around this way,
this structure is generally only used to represent TensorFlow sections, for
which otherwise there isn't any dedicated structure.
"""
def __init__(self,
proto: pb.Computation,
name: Optional[str] = None,
type_signature: Optional[computation_types.Type] = None):
"""Creates a representation of a fully constructed computation.
Args:
proto: An instance of pb.Computation with the computation logic.
name: An optional string name to associate with this computation, used
only for debugging purposes. If the name is not specified (None), it is
autogenerated as a hexadecimal string from the hash of the proto.
type_signature: An optional type signature to associate with this
computation rather than the serialized one.
Raises:
TypeError: if the arguments are of the wrong types.
"""
py_typecheck.check_type(proto, pb.Computation)
if name is not None:
py_typecheck.check_type(name, str)
if type_signature is None:
type_signature = type_serialization.deserialize_type(proto.type)
py_typecheck.check_type(type_signature, computation_types.Type)
super().__init__(type_signature)
self._proto_representation = proto
if name is not None:
self._name = name
else:
self._name = '{:x}'.format(
zlib.adler32(self._proto_representation.SerializeToString()))
def _proto(self) -> pb.Computation:
return self._proto_representation
def _uncached_hash(self):
return hash(self._proto_representation.SerializeToString())
def is_compiled_computation(self):
return True
@property
def name(self) -> str:
return self._name
def __repr__(self) -> str:
return 'CompiledComputation(\'{}\', {!r})'.format(self._name,
self.type_signature)
class Placement(ComputationBuildingBlock):
"""A representation of a placement literal in TFF's internal language.
Currently this can only be `tff.SERVER` or `tff.CLIENTS`.
"""
@classmethod
def from_proto(
cls: Type['Placement'],
computation_proto: pb.Computation,
) -> 'Placement':
_check_computation_oneof(computation_proto, 'placement')
return cls(
placements.uri_to_placement_literal(
str(computation_proto.placement.uri)))
def __init__(self, literal: placements.PlacementLiteral):
"""Constructs a new placement instance for the given placement literal.
Args:
literal: The placement literal.
Raises:
TypeError: if the arguments are of the wrong types.
"""
py_typecheck.check_type(literal, placements.PlacementLiteral)
super().__init__(computation_types.PlacementType())
self._literal = literal
def _proto(self) -> pb.Computation:
return pb.Computation(
type=type_serialization.serialize_type(self.type_signature),
placement=pb.Placement(uri=self._literal.uri))
def _uncached_hash(self):
return hash(self._literal)
def is_placement(self):
return True
@property
def uri(self) -> str:
return self._literal.uri
def __repr__(self) -> str:
return 'Placement(\'{}\')'.format(self.uri)
def _string_representation(
comp: ComputationBuildingBlock,
formatted: bool,
) -> str:
"""Returns the string representation of a `ComputationBuildingBlock`.
This functions creates a `list` of strings representing the given `comp`;
combines the strings in either a formatted or un-formatted representation; and
returns the resulting string represetnation.
Args:
comp: An instance of a `ComputationBuildingBlock`.
formatted: A boolean indicating if the returned string should be formatted.
Raises:
TypeError: If `comp` has an unepxected type.
"""
py_typecheck.check_type(comp, ComputationBuildingBlock)
def _join(components: Iterable[List[str]]) -> List[str]:
"""Returns a `list` of strings by combining each component in `components`.
>>> _join([['a'], ['b'], ['c']])
['abc']
>>> _join([['a', 'b', 'c'], ['d', 'e', 'f']])
['abcd', 'ef']
This function is used to help track where new-lines should be inserted into
the string representation if the lines are formatted.
Args:
components: A `list` where each element is a `list` of strings
representing a part of the string of a `ComputationBuildingBlock`.
"""
lines = ['']
for component in components:
lines[-1] = '{}{}'.format(lines[-1], component[0])
lines.extend(component[1:])
return lines
def _indent(lines, indent_chars=' '):
"""Returns a `list` of strings indented across a slice."""
return ['{}{}'.format(indent_chars, e) for e in lines]
def _lines_for_named_comps(named_comps, formatted):
"""Returns a `list` of strings representing the given `named_comps`.
Args:
named_comps: A `list` of named comutations, each being a pair consisting
of a name (either a string, or `None`) and a `ComputationBuildingBlock`.
formatted: A boolean indicating if the returned string should be
formatted.
"""
lines = []
for index, (name, comp) in enumerate(named_comps):
if index != 0:
if formatted:
lines.append([',', ''])
else:
lines.append([','])
element_lines = _lines_for_comp(comp, formatted)
if name is not None:
element_lines = _join([
['{}='.format(name)],
element_lines,
])
lines.append(element_lines)
return _join(lines)
def _lines_for_comp(comp, formatted):
"""Returns a `list` of strings representing the given `comp`.
Args:
comp: An instance of a `ComputationBuildingBlock`.
formatted: A boolean indicating if the returned string should be
formatted.
"""
if comp.is_block():
lines = []
variables_lines = _lines_for_named_comps(comp.locals, formatted)
if formatted:
variables_lines = _indent(variables_lines)
lines.extend([['(let ', ''], variables_lines, ['', ' in ']])
else:
lines.extend([['(let '], variables_lines, [' in ']])
result_lines = _lines_for_comp(comp.result, formatted)
lines.append(result_lines)
lines.append([')'])
return _join(lines)
elif comp.is_reference():
if comp.context is not None:
return ['{}@{}'.format(comp.name, comp.context)]
else:
return [comp.name]
elif comp.is_selection():
source_lines = _lines_for_comp(comp.source, formatted)
if comp.name is not None:
return _join([source_lines, ['.{}'.format(comp.name)]])
else:
return _join([source_lines, ['[{}]'.format(comp.index)]])
elif comp.is_call():
function_lines = _lines_for_comp(comp.function, formatted)
if comp.argument is not None:
argument_lines = _lines_for_comp(comp.argument, formatted)
return _join([function_lines, ['('], argument_lines, [')']])
else:
return _join([function_lines, ['()']])
elif comp.is_compiled_computation():
return ['comp#{}'.format(comp.name)]
elif comp.is_data():
return [comp.uri]
elif comp.is_intrinsic():
return [comp.uri]
elif comp.is_lambda():
result_lines = _lines_for_comp(comp.result, formatted)
if comp.parameter_type is None:
param_name = ''
else:
param_name = comp.parameter_name
lines = [['({} -> '.format(param_name)], result_lines, [')']]
return _join(lines)
elif comp.is_placement():
return [comp._literal.name] # pylint: disable=protected-access
elif comp.is_struct():
if len(comp) == 0: # pylint: disable=g-explicit-length-test
return ['<>']
elements = structure.to_elements(comp)
elements_lines = _lines_for_named_comps(elements, formatted)
if formatted:
elements_lines = _indent(elements_lines)
lines = [['<', ''], elements_lines, ['', '>']]
else:
lines = [['<'], elements_lines, ['>']]
return _join(lines)
else:
raise NotImplementedError('Unexpected type found: {}.'.format(type(comp)))
lines = _lines_for_comp(comp, formatted)
lines = [line.rstrip() for line in lines]
if formatted:
return '\n'.join(lines)
else:
return ''.join(lines)
def _structural_representation(comp):
"""Returns the structural string representation of the given `comp`.
This functions creates and returns a string representing the structure of the
abstract syntax tree for the given `comp`.
Args:
comp: An instance of a `ComputationBuildingBlock`.
Raises:
TypeError: If `comp` has an unepxected type.
"""
py_typecheck.check_type(comp, ComputationBuildingBlock)
padding_char = ' '
def _get_leading_padding(string):
"""Returns the length of the leading padding for the given `string`."""
for index, character in enumerate(string):
if character != padding_char:
return index
return len(string)
def _get_trailing_padding(string):
"""Returns the length of the trailing padding for the given `string`."""
for index, character in enumerate(reversed(string)):
if character != padding_char:
return index
return len(string)
def _pad_left(lines, total_width):
"""Pads the beginning of each line in `lines` to the given `total_width`.
>>>_pad_left(['aa', 'bb'], 4)
[' aa', ' bb',]
Args:
lines: A `list` of strings to pad.
total_width: The length that each line in `lines` should be padded to.
Returns:
A `list` of lines with padding applied.
"""
def _pad_line_left(line, total_width):
current_width = len(line)
assert current_width <= total_width
padding = total_width - current_width
return '{}{}'.format(padding_char * padding, line)
return [_pad_line_left(line, total_width) for line in lines]
def _pad_right(lines, total_width):
"""Pads the end of each line in `lines` to the given `total_width`.
>>>_pad_right(['aa', 'bb'], 4)
['aa ', 'bb ']
Args:
lines: A `list` of strings to pad.
total_width: The length that each line in `lines` should be padded to.
Returns:
A `list` of lines with padding applied.
"""
def _pad_line_right(line, total_width):
current_width = len(line)
assert current_width <= total_width
padding = total_width - current_width
return '{}{}'.format(line, padding_char * padding)
return [_pad_line_right(line, total_width) for line in lines]
class Alignment(enum.Enum):
LEFT = 1
RIGHT = 2
def _concatenate(lines_1, lines_2, align):
"""Concatenates two `list`s of strings.
Concatenates two `list`s of strings by appending one list of strings to the
other and then aligning lines of different widths by either padding the left
or padding the right of each line to the width of the longest line.
>>>_concatenate(['aa', 'bb'], ['ccc'], Alignment.LEFT)
['aa ', 'bb ', 'ccc']
Args:
lines_1: A `list` of strings.
lines_2: A `list` of strings.
align: An enum indicating how to align lines of different widths.
Returns:
A `list` of lines.
"""
lines = lines_1 + lines_2
longest_line = max(lines, key=len)
longest_width = len(longest_line)
if align is Alignment.LEFT:
return _pad_right(lines, longest_width)
elif align is Alignment.RIGHT:
return _pad_left(lines, longest_width)
def _calculate_inset_from_padding(left, right, preferred_padding,
minimum_content_padding):
"""Calculates the inset for the given padding.
Note: This function is intended to only be called from `_fit_with_padding`.
Args:
left: A `list` of strings.
right: A `list` of strings.
preferred_padding: The preferred amount of non-negative padding between
the lines in the fitted `list` of strings.
minimum_content_padding: The minimum amount of non-negative padding
allowed between the lines in the fitted `list` of strings.
Returns:
An integer.
"""
assert preferred_padding >= 0
assert minimum_content_padding >= 0
trailing_padding = _get_trailing_padding(left[0])
leading_padding = _get_leading_padding(right[0])
inset = trailing_padding + leading_padding - preferred_padding
for left_line, right_line in zip(left[1:], right[1:]):
trailing_padding = _get_trailing_padding(left_line)
leading_padding = _get_leading_padding(right_line)
minimum_inset = trailing_padding + leading_padding - minimum_content_padding
inset = min(inset, minimum_inset)
return inset
def _fit_with_inset(left, right, inset):
r"""Concatenates the lines of two `list`s of strings.
Note: This function is intended to only be called from `_fit_with_padding`.
Args:
left: A `list` of strings.
right: A `list` of strings.
inset: The amount of padding to remove or add when concatenating the
lines.
Returns:
A `list` of lines.
"""
lines = []
for left_line, right_line in zip(left, right):
if inset > 0:
left_inset = 0
right_inset = 0
trailing_padding = _get_trailing_padding(left_line)
if trailing_padding > 0:
left_inset = min(trailing_padding, inset)
left_line = left_line[:-left_inset]
if inset - left_inset > 0:
leading_padding = _get_leading_padding(right_line)
if leading_padding > 0:
right_inset = min(leading_padding, inset - left_inset)
right_line = right_line[right_inset:]
padding = abs(inset) if inset < 0 else 0
line = ''.join([left_line, padding_char * padding, right_line])
lines.append(line)
left_height = len(left)
right_height = len(right)
if left_height > right_height:
lines.extend(left[right_height:])
elif right_height > left_height:
lines.extend(right[left_height:])
longest_line = max(lines, key=len)
longest_width = len(longest_line)
shortest_line = min(lines, key=len)
shortest_width = len(shortest_line)
if shortest_width != longest_width:
if left_height > right_height:
lines = _pad_right(lines, longest_width)
else:
lines = _pad_left(lines, longest_width)
return lines
def _fit_with_padding(left,
right,
preferred_padding,
minimum_content_padding=4):
r"""Concatenates the lines of two `list`s of strings.
Concatenates the lines of two `list`s of strings by appending each line
together using a padding. The same padding is used to append each line and
the padding is calculated starting from the `preferred_padding` without
going below `minimum_content_padding` on any of the lines. If the two
`list`s of strings have different lengths, padding will be applied to
maintain the length of each string in the resulting `list` of strings.
>>>_fit_with_padding(['aa', 'bb'], ['ccc'])
['aa cccc', 'bb ']
>>>_fit_with_padding(['aa ', 'bb '], [' ccc'])
['aa cccc', 'bb ']
Args:
left: A `list` of strings.
right: A `list` of strings.
preferred_padding: The preferred amount of non-negative padding between
the lines in the fitted `list` of strings.
minimum_content_padding: The minimum amount of non-negative padding
allowed between the lines in the fitted `list` of strings.
Returns:
A `list` of lines.
"""
inset = _calculate_inset_from_padding(left, right, preferred_padding,
minimum_content_padding)
return _fit_with_inset(left, right, inset)
def _get_node_label(comp):
"""Returns a string for node in the structure of the given `comp`."""
if comp.is_block():
return 'Block'
elif comp.is_call():
return 'Call'
elif comp.is_compiled_computation():
return 'Compiled({})'.format(comp.name)
elif comp.is_data():
return comp.uri
elif comp.is_intrinsic():
return comp.uri
elif comp.is_lambda():
return 'Lambda({})'.format(comp.parameter_name)
elif comp.is_reference():
return 'Ref({})'.format(comp.name)
elif comp.is_placement():
return 'Placement'
elif comp.is_selection():
key = comp.name if comp.name is not None else comp.index
return 'Sel({})'.format(key)
elif comp.is_struct():
return 'Struct'
else:
raise TypeError('Unexpected type found: {}.'.format(type(comp)))
def _lines_for_named_comps(named_comps):
"""Returns a `list` of strings representing the given `named_comps`.
Args:
named_comps: A `list` of named comutations, each being a pair consisting
of a name (either a string, or `None`) and a `ComputationBuildingBlock`.
"""
lines = ['[']
for index, (name, comp) in enumerate(named_comps):
comp_lines = _lines_for_comp(comp)
if name is not None:
label = '{}='.format(name)
comp_lines = _fit_with_padding([label], comp_lines, 0, 0)
if index == 0:
lines = _fit_with_padding(lines, comp_lines, 0, 0)
else:
lines = _fit_with_padding(lines, [','], 0, 0)
lines = _fit_with_padding(lines, comp_lines, 1)
lines = _fit_with_padding(lines, [']'], 0, 0)
return lines
def _lines_for_comp(comp):
"""Returns a `list` of strings representing the given `comp`.
Args:
comp: An instance of a `ComputationBuildingBlock`.
"""
node_label = _get_node_label(comp)
if (comp.is_compiled_computation() or comp.is_data() or
comp.is_intrinsic() or comp.is_placement() or comp.is_reference()):
return [node_label]
elif comp.is_block():
variables_lines = _lines_for_named_comps(comp.locals)
variables_width = len(variables_lines[0])
variables_trailing_padding = _get_trailing_padding(variables_lines[0])
leading_padding = variables_width - variables_trailing_padding
edge_line = '{}/'.format(padding_char * leading_padding)
variables_lines = _concatenate([edge_line], variables_lines,
Alignment.LEFT)
result_lines = _lines_for_comp(comp.result)
result_width = len(result_lines[0])
leading_padding = _get_leading_padding(result_lines[0]) - 1
trailing_padding = result_width - leading_padding - 1
edge_line = '\\{}'.format(padding_char * trailing_padding)
result_lines = _concatenate([edge_line], result_lines, Alignment.RIGHT)
preferred_padding = len(node_label)
lines = _fit_with_padding(variables_lines, result_lines,
preferred_padding)
leading_padding = _get_leading_padding(lines[0]) + 1
node_line = '{}{}'.format(padding_char * leading_padding, node_label)
return _concatenate([node_line], lines, Alignment.LEFT)
elif comp.is_call():
function_lines = _lines_for_comp(comp.function)
function_width = len(function_lines[0])
function_trailing_padding = _get_trailing_padding(function_lines[0])
leading_padding = function_width - function_trailing_padding
edge_line = '{}/'.format(padding_char * leading_padding)
function_lines = _concatenate([edge_line], function_lines, Alignment.LEFT)
if comp.argument is not None:
argument_lines = _lines_for_comp(comp.argument)
argument_width = len(argument_lines[0])
leading_padding = _get_leading_padding(argument_lines[0]) - 1
trailing_padding = argument_width - leading_padding - 1
edge_line = '\\{}'.format(padding_char * trailing_padding)
argument_lines = _concatenate([edge_line], argument_lines,
Alignment.RIGHT)
preferred_padding = len(node_label)
lines = _fit_with_padding(function_lines, argument_lines,
preferred_padding)
else:
lines = function_lines
leading_padding = _get_leading_padding(lines[0]) + 1
node_line = '{}{}'.format(padding_char * leading_padding, node_label)
return _concatenate([node_line], lines, Alignment.LEFT)
elif comp.is_lambda():
result_lines = _lines_for_comp(comp.result)
leading_padding = _get_leading_padding(result_lines[0])
node_line = '{}{}'.format(padding_char * leading_padding, node_label)
edge_line = '{}|'.format(padding_char * leading_padding)
return _concatenate([node_line, edge_line], result_lines, Alignment.LEFT)
elif comp.is_selection():
source_lines = _lines_for_comp(comp.source)
leading_padding = _get_leading_padding(source_lines[0])
node_line = '{}{}'.format(padding_char * leading_padding, node_label)
edge_line = '{}|'.format(padding_char * leading_padding)
return _concatenate([node_line, edge_line], source_lines, Alignment.LEFT)
elif comp.is_struct():
elements = structure.to_elements(comp)
elements_lines = _lines_for_named_comps(elements)
leading_padding = _get_leading_padding(elements_lines[0])
node_line = '{}{}'.format(padding_char * leading_padding, node_label)
edge_line = '{}|'.format(padding_char * leading_padding)
return _concatenate([node_line, edge_line], elements_lines,
Alignment.LEFT)
else:
raise NotImplementedError('Unexpected type found: {}.'.format(type(comp)))
lines = _lines_for_comp(comp)
lines = [line.rstrip() for line in lines]
return '\n'.join(lines)
# pylint: disable=protected-access
ComputationBuildingBlock._deserializer_dict = {
'reference': Reference.from_proto,
'selection': Selection.from_proto,
'struct': Struct.from_proto,
'call': Call.from_proto,
'lambda': Lambda.from_proto,
'block': Block.from_proto,
'intrinsic': Intrinsic.from_proto,
'data': Data.from_proto,
'placement': Placement.from_proto,
'tensorflow': CompiledComputation,
'xla': CompiledComputation,
}
# pylint: enable=protected-access
|
py | b4026d69a72df202c20740b613ecc0f3d5674cac | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.response_container_derived_metric_definition import ResponseContainerDerivedMetricDefinition # noqa: E501
from wavefront_api_client.rest import ApiException
class TestResponseContainerDerivedMetricDefinition(unittest.TestCase):
"""ResponseContainerDerivedMetricDefinition unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResponseContainerDerivedMetricDefinition(self):
"""Test ResponseContainerDerivedMetricDefinition"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.response_container_derived_metric_definition.ResponseContainerDerivedMetricDefinition() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b4026d6adeb479dc9b4a948491839105cc59ecbe | # This file is generated by /nfs/software/galaxy_dev/tools/intogen/runtime/pyenv/build/scipy/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib'], 'language': 'f77'}
lapack_info={'libraries': ['lapack'], 'library_dirs': ['/usr/lib'], 'language': 'f77'}
atlas_threads_info={}
blas_opt_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'}
openblas_info={}
umfpack_info={}
atlas_blas_threads_info={}
lapack_opt_info={'libraries': ['lapack', 'blas'], 'library_dirs': ['/usr/lib'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'}
openblas_lapack_info={}
atlas_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
py | b4026e0d50c3dc0d6b85567a1518afd4c62a3f51 |
import typing
import zipstream
import math as m
import time
TG_MAX_FILE_SIZE = 2000*1024*1024
class Reader(typing.BinaryIO):
def write(self, s: typing.Union[bytes, bytearray]) -> int:
pass
def mode(self) -> str:
pass
def name(self) -> str:
pass
def close(self) -> None:
pass
def closed(self) -> bool:
pass
def fileno(self) -> int:
pass
def flush(self) -> None:
pass
def isatty(self) -> bool:
pass
def readable(self) -> bool:
pass
def readline(self, limit: int = -1) -> typing.AnyStr:
pass
def readlines(self, hint: int = -1) -> typing.List[typing.AnyStr]:
pass
def seek(self, offset: int, whence: int = 0) -> int:
pass
def seekable(self) -> bool:
pass
def tell(self) -> int:
pass
def truncate(self, size: int = None) -> int:
pass
def writable(self) -> bool:
pass
def write(self, s: typing.AnyStr) -> int:
pass
def writelines(self, lines: typing.List[typing.AnyStr]) -> None:
pass
def __enter__(self) -> 'typing.IO[typing.AnyStr]':
pass
def __exit__(self, type, value, traceback) -> None:
pass
class ZipTorrentContentFile(Reader):
def __init__(self, file_iter, name, size):
self.buf = bytes()
self.processed_size = 0
# self.progress_text = None
self.files_size_sum = 0
file_names_sum = 0
self.zipstream = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_STORED, allowZip64=True)
self.zipstream.write_iter(name, file_iter)
self.files_size_sum += size if size != 0 else 100 * 1024 * 1024 * 1024
file_names_sum += len(name.encode('utf'))
#self.real_size = 21438417 + 205 + 6 #len(files) * (30 + 16 + 46) + 2 * file_names_sum + files_size_sum + 22 + 512
self.real_size = (30 + 16 + 46) + 2 * file_names_sum + self.files_size_sum + 22 + 5120
self.big = self.real_size > TG_MAX_FILE_SIZE
self._size = TG_MAX_FILE_SIZE if self.big else self.real_size
last_repl = False
f_name = ''
for i in name:
if not i.isalnum():
f_name += '_' if last_repl == False else ''
last_repl = True
else:
f_name += i
last_repl = False
self._name = f_name
self.zip_num = 1
self.must_next_file = False
self.zip_parts = m.ceil(self.real_size / TG_MAX_FILE_SIZE)
self.downloaded_bytes_count = 0
self.last_percent = -1
self.should_close = False
self.zipiter = self.zipstream.__aiter__()
self.is_finished = False
self.last_progress_update = time.time()
@property
def size(self):
if self.big:
data_left = self.real_size - (self.zip_num - 1) * TG_MAX_FILE_SIZE
if data_left > TG_MAX_FILE_SIZE:
return TG_MAX_FILE_SIZE
else:
return data_left
else:
return self._size
def close(self):
self.zipstream.close()
def closed(self):
return False
def __enter__(self):
pass
def __exit__(self):
pass
def flush(self):
pass
def isatty(self):
return False
def readable(self):
return True
def readline(self, size=-1):
# future_data = asyncio.run_coroutine_threadsafe(self.read(), client.loop)
# data = future_data.result()
return None
def readlines(self, hint=-1):
# future_data = asyncio.run_coroutine_threadsafe(self.read(), client.loop)
# data = future_data.result()
return None
def seekable(self):
return False
def tell(self):
return 0
def writable(self):
return False
def writelines(self, lines):
return
def __aiter__(self):
return self
async def __anext__(self):
if self.must_next_file:
self.must_next_file = False
raise StopAsyncIteration
data = await self.read(512*1024)
if len(data) == 0 or self.processed_size == 0:
raise StopAsyncIteration
return data
@property
def name(self):
if self.big:
return self._name[:20]+'.zip'+'.{:03d}'.format(self.zip_num)
else:
return self._name + '.zip'
async def read(self, n=-1):
resp = bytes()
if len(self.buf) != 0:
resp = self.buf
self.buf = bytes()
if n == -1:
n = self.size
if n + self.processed_size > TG_MAX_FILE_SIZE:
n = TG_MAX_FILE_SIZE - self.processed_size
elif n + self.processed_size > self.size:
n = self.size - self.processed_size
async for data in self.zipiter:
if data is None:
break
resp += data
if not (len(resp) < n and self.processed_size < TG_MAX_FILE_SIZE):
break
#if time.time() - self.last_progress_update > 2:
# await self.event.edit(self.progress_text.format(str(m.floor((self.downloaded_bytes_count*100) / self.size))))
# self.last_progress_update = time.time()
#resp += await self.zipstream.__aiter__().__next__()
#if len(resp) == 0 and self.should_close == False:
# print("\nSHOULD CLOSE CALL\n")
# self.zipiter = iter(self.zipstream)
# self.should_close = True
# continue
if len(resp) > n:
self.buf = resp[n:]
resp = resp[0:n]
if len(resp) != 0 and n == 0:
# send last piece
self.processed_size += len(resp)
return resp
self.processed_size += len(resp)
if self.processed_size >= TG_MAX_FILE_SIZE:
#if self.is_finished == False and self.real_size - TG_MAX_FILE_SIZE <= 0:
# self.real_size += 1024
# TG_MAX_FILE_SIZE = TG_MAX_FILE_SIZE if self.should_split else self.real_size
# self.big = self.real_size > TG_MAX_FILE_SIZE
# self.size = TG_MAX_FILE_SIZE if self.big else self.real_size
#else:
self.processed_size = 0
self.must_next_file = True
#self.real_size -= TG_MAX_FILE_SIZE
# self._size = TG_MAX_FILE_SIZE if self.real_size > TG_MAX_FILE_SIZE else self.real_size
return resp
|
py | b4026ed6305887a343d9905267d1779a17a0fd76 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
from genshi.template import TemplateLoader
def test():
base_path = os.path.dirname(os.path.abspath(__file__))
loader = TemplateLoader([base_path], auto_reload=True)
start = time.clock()
tmpl = loader.load('test.html')
print ' --> parse stage: %.4f ms' % ((time.clock() - start) * 1000)
data = dict(hello='<world>', skin='default', hey='ZYX', bozz=None,
items=['Number %d' % num for num in range(1, 15)],
prefix='#')
print tmpl.generate(**data).render(method='html')
times = []
for i in range(1000):
start = time.clock()
list(tmpl.generate(**data))
times.append(time.clock() - start)
sys.stdout.write('.')
sys.stdout.flush()
print
print ' --> render stage: %s ms (average)' % (
(sum(times) / len(times) * 1000))
if __name__ == '__main__':
if '-p' in sys.argv:
import hotshot, hotshot.stats
prof = hotshot.Profile("template.prof")
benchtime = prof.runcall(test)
stats = hotshot.stats.load("template.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
else:
test()
|
py | b4027043c1acaf5a0cc71c8566acadda46c64eb6 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for movinet_layers.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.movinet.modeling import movinet_layers
from official.vision.modeling.layers import nn_layers
class MovinetLayersTest(parameterized.TestCase, tf.test.TestCase):
def test_squeeze3d(self):
squeeze = movinet_layers.Squeeze3D()
inputs = tf.ones([5, 1, 1, 1, 3])
predicted = squeeze(inputs)
expected = tf.ones([5, 3])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllEqual(predicted, expected)
def test_mobile_conv2d(self):
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='ones',
use_bias=False,
use_depthwise=False,
use_temporal=False,
use_buffered_input=True,
)
inputs = tf.ones([1, 2, 2, 2, 3])
predicted = conv2d(inputs)
expected = tf.constant(
[[[[[12., 12., 12.],
[12., 12., 12.]],
[[12., 12., 12.],
[12., 12., 12.]]],
[[[12., 12., 12.],
[12., 12., 12.]],
[[12., 12., 12.],
[12., 12., 12.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_mobile_conv2d_bn(self):
batch_norm_op = tf.keras.layers.BatchNormalization(
momentum=0.9,
epsilon=1.,
name='bn')
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='ones',
use_bias=False,
use_depthwise=False,
use_temporal=False,
use_buffered_input=True,
batch_norm_op=batch_norm_op,
)
inputs = tf.ones([1, 2, 2, 2, 3])
predicted = conv2d(inputs)
expected = tf.constant(
[[[[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]],
[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]]],
[[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]],
[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_mobile_conv2d_activation(self):
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='ones',
use_bias=False,
use_depthwise=False,
use_temporal=False,
use_buffered_input=True,
activation_op=tf.nn.relu6,
)
inputs = tf.ones([1, 2, 2, 2, 3])
predicted = conv2d(inputs)
expected = tf.constant(
[[[[[6., 6., 6.],
[6., 6., 6.]],
[[6., 6., 6.],
[6., 6., 6.]]],
[[[6., 6., 6.],
[6., 6., 6.]],
[[6., 6., 6.],
[6., 6., 6.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_mobile_conv2d_temporal(self):
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 1),
strides=(1, 1),
padding='causal',
kernel_initializer='ones',
use_bias=False,
use_depthwise=True,
use_temporal=True,
use_buffered_input=True,
)
inputs = tf.ones([1, 2, 2, 1, 3])
paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]]
padded_inputs = tf.pad(inputs, paddings)
predicted = conv2d(padded_inputs)
expected = tf.constant(
[[[[[1., 1., 1.]],
[[1., 1., 1.]]],
[[[2., 2., 2.]],
[[2., 2., 2.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_stream_buffer(self):
conv3d_stream = nn_layers.Conv3D(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding='causal',
kernel_initializer='ones',
use_bias=False,
use_buffered_input=True,
)
buffer = movinet_layers.StreamBuffer(buffer_size=2)
conv3d = nn_layers.Conv3D(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding='causal',
kernel_initializer='ones',
use_bias=False,
use_buffered_input=False,
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv3d(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = buffer(frame, states=states)
x = conv3d_stream(x)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[12., 12., 12.]]],
[[[24., 24., 24.]]],
[[[36., 36., 36.]]],
[[[36., 36., 36.]]]]])
def test_stream_conv_block_2plus1d(self):
conv_block = movinet_layers.ConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='2plus1d',
)
stream_conv_block = movinet_layers.StreamConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='2plus1d',
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv_block(inputs)
predicted_disabled, _ = stream_conv_block(inputs)
self.assertEqual(predicted_disabled.shape, expected.shape)
self.assertAllClose(predicted_disabled, expected)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = stream_conv_block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[35.9640400, 35.9640400, 35.9640400]]],
[[[71.9280700, 71.9280700, 71.9280700]]],
[[[107.892105, 107.892105, 107.892105]]],
[[[107.892105, 107.892105, 107.892105]]]]])
def test_stream_conv_block_3d_2plus1d(self):
conv_block = movinet_layers.ConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='3d_2plus1d',
)
stream_conv_block = movinet_layers.StreamConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='3d_2plus1d',
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv_block(inputs)
predicted_disabled, _ = stream_conv_block(inputs)
self.assertEqual(predicted_disabled.shape, expected.shape)
self.assertAllClose(predicted_disabled, expected)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = stream_conv_block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[35.9640400, 35.9640400, 35.9640400]]],
[[[71.9280700, 71.9280700, 71.9280700]]],
[[[107.892105, 107.892105, 107.892105]]],
[[[107.892105, 107.892105, 107.892105]]]]])
def test_stream_conv_block(self):
conv_block = movinet_layers.ConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
)
stream_conv_block = movinet_layers.StreamConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv_block(inputs)
predicted_disabled, _ = stream_conv_block(inputs)
self.assertEqual(predicted_disabled.shape, expected.shape)
self.assertAllClose(predicted_disabled, expected)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = stream_conv_block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[11.994005, 11.994005, 11.994005]]],
[[[23.988010, 23.988010, 23.988010]]],
[[[35.982014, 35.982014, 35.982014]]],
[[[35.982014, 35.982014, 35.982014]]]]])
def test_stream_squeeze_excitation(self):
se = movinet_layers.StreamSqueezeExcitation(
3, causal=True, kernel_initializer='ones')
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, _ = se(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = se(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
self.assertAllClose(
predicted,
[[[[[0.9998109, 0.9998109, 0.9998109]],
[[0.9998109, 0.9998109, 0.9998109]]],
[[[1.9999969, 1.9999969, 1.9999969]],
[[1.9999969, 1.9999969, 1.9999969]]],
[[[3., 3., 3.]],
[[3., 3., 3.]]],
[[[4., 4., 4.]],
[[4., 4., 4.]]]]],
1e-5, 1e-5)
def test_stream_squeeze_excitation_2plus3d(self):
se = movinet_layers.StreamSqueezeExcitation(
3,
se_type='2plus3d',
causal=True,
activation='hard_swish',
gating_activation='hard_sigmoid',
kernel_initializer='ones')
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, _ = se(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = se(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, atol=1e-4)
self.assertAllClose(
predicted,
[[[[[1., 1., 1.]],
[[1., 1., 1.]]],
[[[2., 2., 2.]],
[[2., 2., 2.]]],
[[[3., 3., 3.]],
[[3., 3., 3.]]],
[[[4., 4., 4.]],
[[4., 4., 4.]]]]],
atol=1e-4)
def test_stream_movinet_block(self):
block = movinet_layers.MovinetBlock(
out_filters=3,
expand_filters=6,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, _ = block(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_stream_movinet_block_none_se(self):
block = movinet_layers.MovinetBlock(
out_filters=3,
expand_filters=6,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
se_type='none',
state_prefix='test',
)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, expected_states = block(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllEqual(list(expected_states.keys()), ['test_stream_buffer'])
def test_stream_classifier_head(self):
head = movinet_layers.Head(project_filters=5)
classifier_head = movinet_layers.ClassifierHead(
head_filters=10, num_classes=4)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
x, _ = head(inputs)
expected = classifier_head(x)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
for frame in frames:
x, states = head(frame, states=states)
predicted = classifier_head(x)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
if __name__ == '__main__':
tf.test.main()
|
py | b40271c51a7ecec6d556caa880271d649dbf2aaf | import sys
import numpy
from struct import pack
from .constants import bit_depth, NUMPY_DTYPE, SAMPLE_RATE
from .utils import percent_to_db, dbfs_as_percent
import pyaudio
import wave
CHUNK_SIZE = 1024
NUM_CHANNELS = 2
PA_FORMAT = pyaudio.paInt16 if bit_depth == 16 else pyaudio.paInt32
FORMAT_CHAR = 'h' if bit_depth == 16 else 'i'
if not sys.platform == "win32":
GO_UP = "\033[F"
ERASE = "\033[2K"
else:
GO_UP = "\n"
ERASE = ""
def is_silent(snd_data, threshold):
maxval = max(
abs(numpy.amax(snd_data)),
abs(numpy.amin(snd_data))
) / float(2 ** (bit_depth - 1))
return maxval < threshold
def get_input_device_names(py_audio, info):
input_interface_names = {}
for i in range(0, info.get('deviceCount')):
device_info = py_audio.get_device_info_by_host_api_device_index(0, i)
if device_info.get('maxInputChannels') > 0:
input_interface_names[i] = device_info.get('name')
return input_interface_names
def get_input_device_index(py_audio, audio_interface_name=None):
info = py_audio.get_host_api_info_by_index(0)
input_interface_names = get_input_device_names(py_audio, info)
if audio_interface_name:
for index, name in input_interface_names.items():
if audio_interface_name.lower() in name.lower():
return index
else:
raise Exception(
"Could not find audio input '%s' in inputs:\n%s" % (
audio_interface_name,
list_input_devices(input_interface_names)))
def get_input_device_name_by_index(audio_interface_index):
py_audio = pyaudio.PyAudio()
info = py_audio.get_host_api_info_by_index(0)
input_interface_names = get_input_device_names(py_audio, info)
for index, name in input_interface_names.items():
if index == audio_interface_index:
return name
else:
raise Exception(
"Could not find audio input index %s in inputs:\n%s" % (
audio_interface_index,
list_input_devices(input_interface_names)))
def list_input_devices(device_names):
lines = []
for index, name in sorted(device_names.items()):
lines.append("{:3d}. {}".format(index, name))
return "\n".join(lines).encode("ascii", "ignore")
def record(
limit=None,
after_start=None,
on_time_up=None,
threshold=0.00025,
print_progress=True,
allow_empty_return=False,
audio_interface_name=None,
sample_rate=SAMPLE_RATE,
):
p = pyaudio.PyAudio()
input_device_index = get_input_device_index(p, audio_interface_name)
stream = p.open(
format=PA_FORMAT,
channels=NUM_CHANNELS,
rate=sample_rate,
input=True,
output=False,
frames_per_buffer=CHUNK_SIZE,
input_device_index=input_device_index
)
num_silent = 0
silence_timeout = sample_rate * 2.0
snd_started = False
in_tail = False
release_time = None
if print_progress:
sys.stderr.write("\n")
peak_value = None
peak_index = None
data = []
total_length = 0
while 1:
if total_length > 0 and after_start is not None:
after_start()
after_start = None # don't call back again
array = stream.read(CHUNK_SIZE)
snd_data = numpy.fromstring(array, dtype=NUMPY_DTYPE)
snd_data = numpy.reshape(snd_data, (2, -1), 'F')
peak_in_buffer = numpy.amax(numpy.absolute(snd_data), 1)
peak_in_buffer_idx = numpy.argmax(numpy.absolute(snd_data))
mono_peak_in_buffer = max(peak_in_buffer)
if peak_value is None or peak_value < mono_peak_in_buffer:
peak_value = mono_peak_in_buffer
peak_index = total_length + peak_in_buffer_idx
data.append(snd_data)
total_length += len(snd_data[0])
total_duration_seconds = float(total_length) / sample_rate
time_since_peak = total_length - peak_index
peak_pct = mono_peak_in_buffer / peak_value
if time_since_peak:
estimated_remaining_duration = peak_pct / time_since_peak
else:
estimated_remaining_duration = 1
if print_progress:
raw_percentages = (
peak_in_buffer.astype(numpy.float) /
float(2 ** (bit_depth - 1))
)
dbfs = [percent_to_db(x) for x in raw_percentages]
pct_loudness = [dbfs_as_percent(db) for db in dbfs]
sys.stderr.write(ERASE)
sys.stderr.write("\t%2.2f secs\t" % total_duration_seconds)
sys.stderr.write("% 7.2f dBFS\t\t|%s%s|\n" % (
dbfs[0],
int(40 * pct_loudness[0]) * '=',
int(40 * (1 - pct_loudness[0])) * ' ',
))
sys.stderr.write(ERASE)
sys.stderr.write("\t\t\t% 7.2f dBFS\t\t|%s%s|\n" % (
dbfs[1],
int(40 * pct_loudness[1]) * '=',
int(40 * (1 - pct_loudness[1])) * ' ',
))
pct_silence_end = float(num_silent) / silence_timeout
estimated_remaining_duration_string = \
"est. remaining duration: %2.2f secs" % (
estimated_remaining_duration
)
if in_tail:
sys.stderr.write(ERASE)
sys.stderr.write("\t\treleasing\t\tsilence:|%s%s| %s" % (
int(40 * pct_silence_end) * '=',
int(40 * (1 - pct_silence_end)) * ' ',
estimated_remaining_duration_string,
))
else:
sys.stderr.write(ERASE)
sys.stderr.write("\t\t\t\t\tsilence:|%s%s| %s" % (
int(40 * pct_silence_end) * '=',
int(40 * (1 - pct_silence_end)) * ' ',
estimated_remaining_duration_string,
))
sys.stderr.write(GO_UP)
sys.stderr.write(GO_UP)
silent = is_silent(snd_data, threshold)
if silent:
num_silent += CHUNK_SIZE
elif not snd_started:
snd_started = True
else:
num_silent = 0
if num_silent > silence_timeout:
if on_time_up is not None:
on_time_up()
break
elif not in_tail \
and limit is not None \
and total_duration_seconds >= limit:
if on_time_up is not None:
if on_time_up():
num_silent = 0
in_tail = True
release_time = total_duration_seconds
else:
break
else:
break
if print_progress:
sys.stderr.write("\n\n\n")
# TODO this is inefficient, should preallocate a huge
# array up front and then just copy into it maybe?
# but not in the tight loop, what if that causes the clicks?
r = numpy.empty([NUM_CHANNELS, 0], dtype=NUMPY_DTYPE)
for chunk in data:
r = numpy.concatenate((r, chunk), axis=1)
sample_width = p.get_sample_size(PA_FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
if snd_started or allow_empty_return:
return sample_width, r, release_time
else:
return sample_width, None, release_time
def record_to_file(
path,
limit,
after_start=None,
on_time_up=None,
sample_rate=SAMPLE_RATE
):
sample_width, data, release_time = record(
limit,
after_start,
on_time_up,
sample_rate,
)
if data is not None:
save_to_file(path, sample_width, data, sample_rate)
return path
else:
return None
# TODO: WAVE files with bit depths > 16 should use extensible format:
# http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
def save_to_file(path, sample_width, data, sample_rate=SAMPLE_RATE):
wf = wave.open(path, 'wb')
wf.setnchannels(NUM_CHANNELS)
wf.setsampwidth(sample_width)
wf.setframerate(sample_rate)
flattened = numpy.asarray(data.flatten('F'), dtype=NUMPY_DTYPE)
write_chunk_size = 512
for chunk_start in range(0, len(flattened), write_chunk_size):
chunk = flattened[chunk_start:chunk_start + write_chunk_size]
packstring = '<' + (FORMAT_CHAR * len(chunk))
wf.writeframes(pack(packstring, *chunk))
wf.close()
if __name__ == '__main__':
print(record_to_file('./demo.wav', sys.argv[1] if sys.argv[1] else None))
print("done - result written to demo.wav")
|
py | b40272f823edc462e39f2422c36afd5feb16abcd | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
def get_pytext_home():
internal_home = os.path.realpath(os.path.join(__file__, "../../"))
oss_home = os.path.realpath(os.path.join(__file__, "../../../"))
default_home = ""
if os.path.exists(os.path.join(internal_home, "tests")):
default_home = internal_home
elif os.path.exists(os.path.join(oss_home, "tests")):
default_home = oss_home
else:
raise Exception("Can't find PYTEXT_HOME")
pytext_home = os.environ.get("PYTEXT_HOME", default_home)
print(f"PYTEXT_HOME: {pytext_home}")
return pytext_home
PYTEXT_HOME = get_pytext_home()
def get_absolute_path(path):
return (
path
if path.startswith("/")
else os.path.realpath(os.path.join(PYTEXT_HOME, path))
)
|
py | b402735670d7a8b4944a86640eebb14cab203ea2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 15:20:21 2020
@author: miyazakishinichi
"""
import os
import sys
import pandas as pd
from tkinter import messagebox
def csv_file_read(filepath):
file_dir, file_name = os.path.split(filepath)
base, ext = os.path.splitext(file_name)
if ext == '.csv':
data = pd.read_csv(filepath, index_col = 0)
return data
else:
return messagebox.showinfo('error',
'selected file is not csv file')
def image_list_extraction(directory_path):
filelist = os.listdir(directory_path)
filelist = [i for i in filelist if os.path.splitext(i)[1] == '.jpg' \
or os.path.splitext(i)[1] == '.png']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.