id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1840691
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import *
class VideoForm(forms.ModelForm):
class Meta:
model = Example
exclude = []
widgets = {
'video': forms.ClearableFileInput,
}
# class Media:
# css = {
# "all": [
# "css/bootstrap.css",
# "css/bootstrap-progressbar-2.3.2.css",
# ]
# }
# js = [
# 'js/jquery.js',
# 'js/bootstrap.js',
# 'js/bootstrap-progressbar.js',
# ]
|
StarcoderdataPython
|
80326
|
from django.test import TestCase
from addressbase.models import UprnToCouncil, Address
from councils.tests.factories import CouncilFactory
from data_importers.tests.stubs import stub_addressimport
# High-level functional tests for import scripts
class ImporterTest(TestCase):
opts = {"nochecks": True, "verbosity": 0}
def set_up(self, addressbase, uprns, addresses_name):
for address in addressbase:
Address.objects.update_or_create(**address)
for uprn in uprns:
UprnToCouncil.objects.update_or_create(pk=uprn, lad="X01000000")
CouncilFactory(pk="ABC", identifiers=["X01000000"])
cmd = stub_addressimport.Command()
cmd.addresses_name = addresses_name
cmd.handle(**self.opts)
def test_duplicate_uprns(self):
"""
In the csv there are two matching uprns with different polling station ids.
Despite one appearing in addressbase, neither should be imported.
"""
test_params = {
"uprns": ["1", "2", "6"],
"addressbase": [
{
"address": "Another Haringey Park, London",
"uprn": "1",
"postcode": "N8 8NM",
},
{"address": "Haringey Park, London", "uprn": "2", "postcode": "N8 9JG"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "duplicate_uprns.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_uprn_not_in_addressbase(self):
"""uprn does not appear in addressbase data, or in UprnToCouncil table"""
test_params = {
"uprns": ["6"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "uprn_missing.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_uprn_assigned_to_wrong_council(self):
"""Uprn exists but we've located it in a different council in UprnToCouncil table"""
test_params = {
"uprns": ["6"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "uprn_missing.csv",
}
self.set_up(**test_params)
UprnToCouncil.objects.update_or_create(pk=4, lad="X01000002")
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_postcode_mismatch(self):
"""Uprn exists but postcodes don't match"""
test_params = {
"uprns": ["4", "7"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "4 Factory Rd, Poole",
"uprn": "7",
"postcode": "BH16 5HT", # postcode is 'BH17 5HT' in csv
},
],
"addresses_name": "uprn_missing.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("4", "1")}
self.assertEqual(set(imported_uprns), expected)
def test_address_import(self):
test_params = {
"uprns": ["1", "3", "4", "5", "6", "7"],
"addressbase": [
{"address": "Haringey Park, London", "uprn": "1", "postcode": "N8 9JG"},
# uprn '2' in addresses.csv but wasn't in addressbase so not in uprntocouncil either
{
"address": "36 Abbots Park, London",
"uprn": "3",
"postcode": "SW2 3QD",
},
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "5-6 Mickleton Dr, Southport",
"uprn": "5",
"postcode": "PR8 2QX",
},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
{
"address": "4 Factory Rd, Poole",
"uprn": "7",
"postcode": "BH16 5HT", # postcode is 'BH17 5HT' in csv
},
],
"addresses_name": "addresses.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(3, len(imported_uprns))
expected = {("3", "3"), ("4", "1"), ("6", "2")}
self.assertEqual(set(imported_uprns), expected)
|
StarcoderdataPython
|
6521789
|
<filename>backend/model_api/urls.py
from django.urls import path
from model_api import views
urlpatterns = [
path('affected_by/', views.affected_by),
path('areas/', views.areas),
path('cumulative_infections/', views.cumulative_infections),
path('predict/', views.predict),
path('predict_all/', views.predict_all),
path('models/', views.models),
path('infection_models/', views.infection_models),
path('death_models/', views.death_models),
path('current_date/', views.getCurrentDate),
path('scores/', views.scores),
path('scores_all/', views.scores_all),
path('check_history/', views.check_history),
path('history_cumulative/', views.history_cumulative),
path('latest_score_date/', views.latest_score_date),
path('cumulative_death/', views.cumulative_death),
path('all_mrf_scores/', views.all_mrf_scores),
path('get_maximum/', views.get_maximum)
]
|
StarcoderdataPython
|
5178258
|
import json
import uuid
import aiohttp
import pytest
HOSTNAME, PATH = 'https://test.ar', 'index'
async def test_match_request(ar, session):
ar.get(HOSTNAME, PATH, handler={})
try:
await session.get(f'{HOSTNAME}/{PATH}')
except aiohttp.ClientConnectionError as e:
pytest.fail(f'Should not have returned {e}', pytrace=True)
async def test_no_match(ar, session):
with pytest.raises(aiohttp.ClientConnectionError):
await session.get(f'{HOSTNAME}/{PATH}')
async def test_response_single_use(ar, session):
ar.get(HOSTNAME, PATH, handler={})
await session.get(f'{HOSTNAME}/{PATH}')
with pytest.raises(aiohttp.ClientConnectionError):
await session.get(f'{HOSTNAME}{PATH}')
async def test_post_reponse(ar, session):
ar.post(HOSTNAME, PATH, handler={})
try:
await session.post(f'{HOSTNAME}/{PATH}')
except aiohttp.ClientConnectionError as e:
pytest.fail(f'Should not have returned {e}', pytrace=True)
async def test_get_response(ar, session):
ar.get(HOSTNAME, PATH, handler={})
try:
await session.get(f'{HOSTNAME}/{PATH}')
except aiohttp.ClientConnectionError as e:
pytest.fail(f'Should not have returned {e}', pytrace=True)
async def test_dict_handler(ar, session):
payload = {'test': 'test'}
ar.get(HOSTNAME, PATH, handler=payload)
resp = await session.get(f'{HOSTNAME}/{PATH}')
json = await resp.json()
assert json == payload
async def test_callable_handler(ar, session):
def handler(data, **kwargs):
return kwargs
kwargs = {'json': '{"key": "val"}'}
ar.post(HOSTNAME, PATH, handler=handler)
resp = await session.post(f'{HOSTNAME}/{PATH}', **kwargs)
json = await resp.json()
assert json == kwargs
async def test_str_handler(ar, session):
ar.get(HOSTNAME, PATH, handler='test')
response = await session.get(f'{HOSTNAME}/{PATH}')
with pytest.raises(json.JSONDecodeError):
await response.json()
async def test_body_not_serializable(ar, session):
with pytest.raises(ValueError):
ar.post(HOSTNAME, PATH, handler={})
await session.post(f'{HOSTNAME}/{PATH}', json={'id': uuid.uuid4()})
async def test_exception_handler(ar, session):
with pytest.raises(ZeroDivisionError):
ar.get(HOSTNAME, PATH, handler=ZeroDivisionError())
await session.get(f'{HOSTNAME}/{PATH}')
async def test_raise_for_status(ar, session):
ar.get(HOSTNAME, PATH, handler={'test': 'test'}, status=500)
with pytest.raises(aiohttp.ClientResponseError):
await session.get(f'{HOSTNAME}/{PATH}', raise_for_status=True)
ar.get(HOSTNAME, PATH, handler=ZeroDivisionError())
with pytest.raises(aiohttp.ClientResponseError):
await session.get(f'{HOSTNAME}/{PATH}', raise_for_status=True)
async def test_preserve_response_class(ar):
class CustomResponse(aiohttp.ClientResponse):
async def json(self, *args, **kwargs):
return {'hello': 'world'}
ar.get(HOSTNAME, PATH, handler={})
async with aiohttp.ClientSession(response_class=CustomResponse) as session:
response = await session.get(f'{HOSTNAME}/{PATH}')
assert await response.json() == {'hello': 'world'}
assert isinstance(response, CustomResponse)
|
StarcoderdataPython
|
3428963
|
import argparse
import collections
import inspect
import re
import signal
import sys
from datetime import datetime as dt
import numpy as np
def argparsify(f, test=None):
args, _, _, defaults = inspect.getargspec(f)
assert(len(args) == len(defaults))
parser = argparse.ArgumentParser()
i = 0
for arg in args:
argtype = type(defaults[i])
if argtype == bool: # convert to action
if defaults[i] == False:
action="store_true"
else:
action="store_false"
parser.add_argument("-%s" % arg, "--%s" % arg, action=action, default=defaults[i])
else:
parser.add_argument("-%s"%arg, "--%s"%arg, type=type(defaults[i]))
i += 1
if test is not None:
par = parser.parse_args([test])
else:
par = parser.parse_args()
kwargs = {}
for arg in args:
if getattr(par, arg) is not None:
kwargs[arg] = getattr(par, arg)
return kwargs
def argprun(f, sigint_shell=True, **kwargs): # command line overrides kwargs
def handler(sig, frame):
# find the frame right under the argprun
print "custom handler called"
original_frame = frame
current_frame = original_frame
previous_frame = None
stop = False
while not stop and current_frame.f_back is not None:
previous_frame = current_frame
current_frame = current_frame.f_back
if "_FRAME_LEVEL" in current_frame.f_locals \
and current_frame.f_locals["_FRAME_LEVEL"] == "ARGPRUN":
stop = True
if stop: # argprun frame found
__toexposelocals = previous_frame.f_locals # f-level frame locals
class L(object):
pass
l = L()
for k, v in __toexposelocals.items():
setattr(l, k, v)
stopprompt = False
while not stopprompt:
whattodo = raw_input("(s)hell, (k)ill\n>>")
if whattodo == "s":
embed()
elif whattodo == "k":
"Killing"
sys.exit()
else:
stopprompt = True
if sigint_shell:
_FRAME_LEVEL="ARGPRUN"
prevhandler = signal.signal(signal.SIGINT, handler)
try:
f_args = argparsify(f)
for k, v in kwargs.items():
if k not in f_args:
f_args[k] = v
f(**f_args)
except KeyboardInterrupt:
print("Interrupted by Keyboard")
|
StarcoderdataPython
|
278578
|
#!/usr/bin/python
#coding=utf-8
"""
第 0009 题:一个HTML文件,找出里面的链接
"""
from bs4 import BeautifulSoup
def find_the_link(filepath):
links = []
with open(filepath) as f:
text = f.read()
bs =BeautifulSoup(text)
for i in bs.find_all('a'):
links.append(i['href'])
return links
if __name__ == '__main__':
print find_the_link('Show-Me-the-Code_show-me-the-code_1.html')
|
StarcoderdataPython
|
294967
|
"""
Django settings for matatu project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import django_heroku
import dj_database_url
from decouple import config,Csv
import cloudinary
import cloudinary.uploader
import cloudinary.api
cloudinary.config(
cloud_name = config('CD_NAME'),
api_key= config('CD_API'),
api_secret=config('CD_SECRET'),
secure = config('CD_SECURE')
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = os.environ.get('DEBUG', True)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mat',
# 'tinymice',
'bootstrap4',
'bootstrap3',
'crispy_forms',
# 'django_static_fontawesome',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django.contrib.sites',
'django_daraja',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'matatu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'matatu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if config('MODE')=='dev':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
else:
DATABASES = {
'default': dj_database_url.config(default=config('DATABASE_URL'))
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS =['.localhost','.herokuapp.com','127.0.0.1']
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
django_heroku.settings(locals())
SITE_ID = 4
LOGIN_REDIRECT_URL ='/'
LOGOUT_REDIRECT_URL ='/'
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
]
# The Mpesa environment to use
# Possible values: sandbox, production
MPESA_ENVIRONMENT = 'sandbox'
# Credentials for the daraja app
MPESA_CONSUMER_KEY = config('MPESA_CONSUMER_KEY')
MPESA_CONSUMER_SECRET = config('MPESA_CONSUMER_SECRET')
#Shortcode to use for transactions. For sandbox use the Shortcode 1 provided on test credentials page
MPESA_SHORTCODE = config('MPESA_SHORTCODE')
# Shortcode to use for Lipa na MPESA Online (MPESA Express) transactions
# This is only used on sandbox, do not set this variable in production
# For sandbox use the Lipa na MPESA Online Shorcode provided on test credentials page
MPESA_EXPRESS_SHORTCODE = config('MPESA_SHORTCODE')
# Type of shortcode
# Possible values:
# - paybill (For Paybill)
# - till_number (For Buy Goods Till Number)
MPESA_SHORTCODE_TYPE = 'paybill'
# Lipa na MPESA Online passkey
# Sandbox passkey is available on test credentials page
# Production passkey is sent via email once you go live
MPESA_PASSKEY = config('MPESA_PASSKEY')
# Username for initiator (to be used in B2C, B2B, AccountBalance and TransactionStatusQuery Transactions)
MPESA_INITIATOR_USERNAME = 'initiator_username'
# Plaintext password for initiator (to be used in B2C, B2B, AccountBalance and TransactionStatusQuery Transactions)
MPESA_INITIATOR_SECURITY_CREDENTIAL = 'initiator_security_credential'
|
StarcoderdataPython
|
6502997
|
#!/usr/local/bin/python3
from driver import Driver
from log import Log
from selenium.common.exceptions import NoSuchElementException
class Element(Driver):
def find_by_id(self, id=None):
if not id:
return False
try:
return self.driver.find_element_by_id(id)
except NoSuchElementException:
Log().write_log('Driver','Element Select Faild!!!')
def find_by_css_select(self, css=None):
if not css:
return False
try:
return self.driver.find_element_by_css_selector(css)
except NoSuchElementException:
Log().write_log('Driver', 'Element Select Faild!!!')
def find_by_class_name(self, name=None):
if not name:
return False
try:
return self.driver.find_element_by_class_name(name)
except NoSuchElementException:
Log().write_log('Driver', 'Element Select Faild!!!')
def find_by_link_text(self, text):
if not text:
return False
try:
return self.driver.find_element_by_link_text(text)
except NoSuchElementException:
Log().write_log('Driver', 'Element Select Faild!!!')
|
StarcoderdataPython
|
5130331
|
<filename>tg_gui_core/__init__.py
from . import implementation_support
from .implementation_support import TYPE_CHECKING
from .shared import Identifiable, Pixels, UID
from .widget import Widget
from .attrs import WidgetAttr, widget
from .container import ContainerWidget
|
StarcoderdataPython
|
11283566
|
from .binomial_regression import BinomRegression
from .linear_regression import BayesianLinearRegression
|
StarcoderdataPython
|
8360
|
<filename>tests/pyre/components/component_class_registration_model.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that component registration interacts correctly with the pyre configurator model
"""
# access
# print(" -- importing pyre")
import pyre
# print(" -- done")
def declare():
# declare a protocol
class protocol(pyre.protocol):
"""a protocol"""
# properties
p1 = pyre.properties.str()
p2 = pyre.properties.str()
# behavior
@pyre.provides
def do(self):
"""behave"""
# declare a component
class component(pyre.component, family="test", implements=protocol):
"""a component"""
# traits
p1 = pyre.properties.str(default="p1")
p2 = pyre.properties.str(default="p2")
@pyre.export
def do(self):
"""behave"""
return "component"
return component
def test():
# and the model
model = pyre.executive.nameserver
# model.dump(pattern='test')
# print(" -- making some configuration changes")
# add an assignment
model['test.p1'] = 'step 1'
# an alias
model.alias(alias='p1', target='test.p1')
# and a reference to the alias
model['ref'] = '{p1}'
# check that they point to the same slot
assert model.retrieve(name='p1') == model.retrieve(name='test.p1')
# save the nodes
ref = model.retrieve(name='ref')
step_0 = model.retrieve(name='test.p1')
# now declare the component and its protocol
# print(" -- declaring components")
component = declare()
# print(" -- done")
# model.dump(pattern='')
assert component.p1 == 'step 1'
assert component.p2 == 'p2'
# check that the model is as we expect
# model.dump()
assert model['test.p1'] == component.p1
assert model['test.p2'] == component.p2
# how about the alias and the reference?
assert model['ref'] == component.p1
assert model['p1'] == component.p1
# make a late registration to what is now the component trait
model['test.p2'] = 'step 2'
# model.dump(pattern='test')
# and check
assert component.p1 == 'step 1'
assert component.p2 == 'step 2'
return
# main
if __name__ == "__main__":
test()
# end of file
|
StarcoderdataPython
|
8159114
|
<filename>apps/auth/views.py
import logging
import datetime
from misc.mixins import myTemplateView
class indexView(myTemplateView):
template='index.tpl'
class expressauthView(myTemplateView):
template='auth/expressauth.tpl'
class regView(myTemplateView):
template='auth/reg.tpl'
class repairView(myTemplateView):
template='auth/repair.tpl'
|
StarcoderdataPython
|
1739936
|
List = list(map(int, input().split()))
List.insert(0, List.pop())
print(*List)
|
StarcoderdataPython
|
300589
|
#//
#// -------------------------------------------------------------
#// Copyright 2004-2011 Synopsys, Inc.
#// Copyright 2010 Mentor Graphics Corporation
#// Copyright 2010-2011 Cadence Design Systems, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
import cocotb
from cocotb.triggers import Timer, RisingEdge
from uvm.macros import *
from uvm.base import sv, UVMConfigDb, UVMComponent
from uvm.reg import UVMRegPredictor
from regmodel import dut_regmodel
from apb.apb_agent import apb_agent
from apb.apb_rw import reg2apb_adapter
EXPLICIT_MON = 1
#//
#// This example shows how to integrate a register model
#// directly onto a bus sequencer.
#//
#// By default, the mirror in the register model is updated implicitly.
#// For explicit monitoring, define the `EXPLICIT_MON macro
#//
class tb_env(UVMComponent):
def __init__(self, name, parent=None):
super().__init__(name,parent)
self.regmodel = None # dut_regmodel
self.Aapb = None # apb_agent
self.seq = None # uvm_reg_sequence
self.vif = None
if EXPLICIT_MON:
self.apb2reg_predictor = None # uvm_reg_predictor#(apb_rw)
def build_phase(self, phase):
if self.regmodel is None:
self.regmodel = dut_regmodel.type_id.create("regmodel",None,self.get_full_name())
self.regmodel.build()
self.regmodel.lock_model()
self.apb = apb_agent.type_id.create("apb", self)
if EXPLICIT_MON:
self.apb2reg_predictor = UVMRegPredictor("apb2reg_predictor", self)
hdl_root = "dut"
sv.value_plusargs("ROOT_HDL_PATH=%s",hdl_root) # cast to 'void' removed
self.regmodel.set_hdl_path_root(hdl_root)
vif = []
if UVMConfigDb.get(self, "apb", "vif", vif):
self.vif = vif[0]
else:
uvm_fatal("NO_VIF", "Could not find vif from config DB")
def connect_phase(self, phase):
if self.apb is not None:
self.reg2apb = reg2apb_adapter('adapter')
self.regmodel.default_map.set_sequencer(self.apb.sqr, self.reg2apb)
if EXPLICIT_MON:
self.apb2reg_predictor.map = self.regmodel.default_map
self.apb2reg_predictor.adapter = self.reg2apb
self.regmodel.default_map.set_auto_predict(0)
self.apb.mon.ap.connect(self.apb2reg_predictor.bus_in)
else:
self.regmodel.default_map.set_auto_predict(1)
self.regmodel.print_obj()
async def run_phase(self, phase):
phase.raise_objection(self)
if self.seq is None:
uvm_fatal("NO_SEQUENCE","Env's sequence is not defined. Nothing to do. Exiting.")
return
# begin : do_reset
uvm_info("RESET","Performing reset of 5 cycles", UVM_LOW)
self.vif.rst <= 1
for _ in range(5):
await RisingEdge(self.vif.clk)
self.vif.rst <= 0
await Timer(100, "NS")
uvm_info("START_SEQ", "Starting sequence '" + self.seq.get_name() + "'", UVM_LOW)
self.seq.model = self.regmodel
await self.seq.start(None)
phase.drop_objection(self)
uvm_component_utils(tb_env)
|
StarcoderdataPython
|
1960162
|
<gh_stars>1-10
from chutil.visualize import show
|
StarcoderdataPython
|
3229582
|
# -*- coding: utf-8 -*-
import os
from distutils.core import setup
here = os.path.dirname(__file__)
def get_long_desc():
return open(os.path.join(here, 'README.rst')).read()
# Function borrowed from carljm.
def get_version():
fh = open(os.path.join(here, "faq", "__init__.py"))
try:
for line in fh.readlines():
if line.startswith("__version__ ="):
return line.split("=")[1].strip().strip("'")
finally:
fh.close()
setup(
name='django-faq',
version=get_version(),
description='Frequently Asked Question (FAQ) management for Django apps.',
url='https://github.com/benspaulding/django-faq/',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
download_url='http://github.com/benspaulding/django-faq/tarball/v%s' % get_version(),
long_description = get_long_desc(),
packages = [
'faq',
'faq.tests',
'faq.urls',
'faq.views',
],
package_data = {
'faq': [
'fixtures/*',
'locale/*/LC_MESSAGES/*',
'templates/faq/*',
'templates/search/indexes/faq/*',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
)
|
StarcoderdataPython
|
3297897
|
from .base import UrbanClient, UrbanDefinition, UrbanDictionaryError
|
StarcoderdataPython
|
6469605
|
<filename>cloakifyFactory.py
#!/usr/bin/python
#
# Filename: cloakifyFactory.py
#
# Version: 1.0.1
#
# Author: <NAME> (TryCatchHCF)
#
# Summary: Cloakify Factory is part of the Cloakify Exfiltration toolset that transforms
# any fileype into lists of words / phrases / Unicode to ease exfiltration of data across
# monitored networks, defeat data whitelisting restrictions, hiding the data in plain
# sight, and facilitates social engineering attacks against human analysts and their
# workflows. Bonus Feature: Defeats signature-based malware detection tools (cloak your
# other tools). Leverages other scripts of the Cloakify Exfiltration Toolset, including
# cloakify.py, decloakify.py, and the noise generator scripts.
#
# Description: Base64-encodes the given payload and translates the output using a list
# of words/phrases/Unicode provided in the cipher. This is NOT a secure encryption tool,
# the output is vulnerable to frequency analysis attacks. Use the Noise Generator scripts
# to add entropy to your cloaked file. You should encrypt the file before cloaking if
# secrecy is needed.
#
# Prepackaged ciphers include: lists of desserts in English, Arabic, Thai, Russian,
# Hindi, Chinese, Persian, and Muppet (Swedish Chef); PokemonGo creatures; Top 100 IP
# Addresses; Top Websites; GeoCoords of World Capitols; MD5 Password Hashes; An Emoji
# cipher; Star Trek characters; Geocaching Locations; Amphibians (Scientific Names);
# evadeAV cipher (simple cipher that minimizes size of the resulting obfuscated data).
#
# To create your own cipher:
#
# - Generate a list of at least 66 unique words (Unicode-16 accepted)
# - Remove all duplicate entries and blank lines
# - Randomize the list
# - Place in the 'ciphers/' subdirectory
# - Relaunch cloakifyFactory and it will automatically detect the new cipher
#
# Example:
#
# $ ./cloakifyFactory.py
#
import os, sys, getopt, random, base64, cloakify, decloakify
# Load list of ciphers
gCipherFiles = next(os.walk("./ciphers/"))[2]
# Load list of noise generators
gNoiseScripts = []
for root, dirs, files in os.walk( "./noiseTools" ):
for file in files:
if file.endswith('.py'):
gNoiseScripts.append( file )
def CloakifyFile():
print ""
print "==== Cloakify a File ===="
print ""
sourceFile = raw_input("Enter filename to cloak (e.g. ImADolphin.exe or /foo/bar.zip): ")
print ""
cloakedFile = raw_input("Save cloaked data to filename (default: 'tempList.txt'): ")
if cloakedFile == "":
cloakedFile = "tempList.txt"
cipherNum = SelectCipher()
noiseNum = -1
choice = raw_input("Add noise to cloaked file? (y/n): ")
if choice == "y":
noiseNum = SelectNoise()
print ""
print "Creating cloaked file using cipher:", gCipherFiles[ cipherNum ]
try:
cloakify.Cloakify( sourceFile, "ciphers/" + gCipherFiles[ cipherNum ], cloakedFile )
except:
print ""
print "!!! Well that didn't go well. Verify that your cipher is in the 'ciphers/' subdirectory."
print ""
if noiseNum >=0:
print "Adding noise to cloaked file using noise generator:", gNoiseScripts[ noiseNum ]
try:
os.system( "noiseTools/%s %s" % ( gNoiseScripts[ noiseNum ], cloakedFile ))
except:
print ""
print "!!! Well that didn't go well. Verify that '", cloakedFile, "'"
print "!!! is in the current working directory or try again giving full filepath."
print ""
print ""
print "Cloaked file saved to:", cloakedFile
print ""
choice = raw_input( "Preview cloaked file? (y/n): " )
if choice == "y":
print ""
with open( cloakedFile ) as file:
cloakedPreview = file.readlines()
i = 0;
while ( i<20 ):
print cloakedPreview[ i ],
i = i+1
print ""
choice = raw_input( "Press return to continue... " )
def DecloakifyFile():
decloakTempFile = "decloakTempFile.txt"
print ""
print "==== Decloakify a Cloaked File ===="
print ""
sourceFile = raw_input( "Enter filename to decloakify (e.g. /foo/bar/MyBoringList.txt): " )
print ""
decloakedFile = raw_input( "Save decloaked data to filename (default: 'decloaked.file'): " )
print ""
if decloakedFile == "":
decloakedFile = "decloaked.file"
# Reviewing the cloaked file within cloakifyFactory will save a little time for those who
# forgot the format of the cloaked file and don't want to hop into a new window just to look
choice = raw_input( "Preview cloaked file? (y/n default=n): " )
if choice == "y":
print ""
try:
with open( sourceFile ) as file:
cloakedPreview = file.readlines()
i = 0;
while ( i<20 ):
print cloakedPreview[ i ],
i = i+1
print ""
except:
print ""
print "!!! Well that didn't go well. Verify that '", sourceFile, "'"
print "!!! is in the current working directory or the filepath you gave."
print ""
choice = raw_input("Was noise added to the cloaked file? (y/n default=n): ")
if choice == "y":
noiseNum = SelectNoise()
stripColumns = 2
# No upper bound checking, relies on SelectNoise() returning valid value, fix in next release
if noiseNum >= 0:
try:
# Remove Noise, overwrite the source file with the stripped contents
print "Removing noise from noise generator:", gNoiseScripts[ noiseNum ]
os.system( "./removeNoise.py %s %s %s" % ( stripColumns, sourceFile, decloakTempFile ))
# Copy decloak temp filename to sourceFile so that Decloakify() gets the right filename
sourceFile = decloakTempFile
except:
print "!!! Error while removing noise from file. Was calling 'removeNoise.py'.\n"
cipherNum = SelectCipher()
print "Decloaking file using cipher: ", gCipherFiles[ cipherNum ]
# Call Decloakify()
try:
decloakify.Decloakify( sourceFile, "ciphers/" + gCipherFiles[ cipherNum ], decloakedFile )
print ""
print "Decloaked file", sourceFile, ", saved to", decloakedFile
except:
print ""
print "!!! Oh noes! Error decloaking file (did you select the same cipher it was cloaked with?)"
print ""
try:
os.system( "rm -f %s" % ( decloakTempFile ))
except:
print ""
print "!!! Oh noes! Error while deleting temporary file:", decloakTempFile
print ""
choice = raw_input("Press return to continue... ")
def SelectCipher():
print ""
print "Ciphers:"
print ""
cipherCount = 1
for cipherName in gCipherFiles:
print cipherCount, "-", cipherName
cipherCount = cipherCount + 1
print ""
selection = -1
while ( selection < 0 or selection > (cipherCount - 2)):
try:
cipherNum = raw_input( "Enter cipher #: " )
selection = int ( cipherNum ) - 1
if ( cipherNum == "" or selection < 0 or selection > (cipherCount - 1)):
print "Invalid cipher number, try again..."
selection = -1
except ValueError:
print "Invalid cipher number, try again..."
print ""
return selection
def BrowseCiphers():
print ""
print "======== Preview Ciphers ========"
cipherNum = SelectCipher()
print "===== Cipher:", gCipherFiles[ cipherNum ], " ====="
print ""
try:
with open( "ciphers/"+gCipherFiles[ cipherNum ] ) as cipherList:
arrayCipher = cipherList.read()
print( arrayCipher )
except:
print "!!! Error opening cipher file.\n"
choice = raw_input( "Press return to continue... " )
def SelectNoise():
print ""
print "Noise Generators:"
print ""
noiseCount = 1
for noiseName in gNoiseScripts:
print noiseCount, "-", noiseName
noiseCount = noiseCount + 1
print ""
selection = -1
noiseTotal = noiseCount - 2
while ( selection < 0 or selection > noiseTotal ):
try:
noiseNum = raw_input( "Enter noise generator #: " )
selection = int ( noiseNum ) - 1
if ( selection == "" or selection < 0 or selection > noiseTotal ):
print "Invalid generator number, try again..."
selection = -1
except ValueError:
print "Invalid generator number, try again..."
return selection
def BrowseNoise():
print ""
print "======== Preview Noise Generators ========"
noiseNum = SelectNoise()
print ""
# No upper bounds checking, relies on SelectNoise() to return a valid value, fix in next update
if noiseNum >= 0:
try:
print "Sample output of prepended strings, using noise generator:", gNoiseScripts[ noiseNum ], "\n"
os.system( "noiseTools/%s" % ( gNoiseScripts[ noiseNum ] ))
except:
print "!!! Error while generating noise preview.\n"
print ""
choice = raw_input( "Press return to continue... " )
def Help():
print ""
print "===================== Using Cloakify Factory ====================="
print ""
print "For background and full tutorial, see the presentation slides at"
print "https://github.com/TryCatchHCF/Cloakify"
print ""
print "WHAT IT DOES:"
print ""
print "Cloakify Factory transforms any filetype (e.g. .zip, .exe, .xls, etc.) into"
print "a list of harmless-looking strings. This lets you hide the file in plain sight,"
print "and transfer the file without triggering alerts. The fancy term for this is"
print "'text-based steganography', hiding data by making it look like other data."
print ""
print "For example, you can transform a .zip file into a list made of Pokemon creatures"
print "or Top 100 Websites. You then transfer the cloaked file however you choose,"
print "and then decloak the exfiltrated file back into its original form. The ciphers"
print "are designed to appear like harmless / ignorable lists, though some (like MD5"
print "password hashes) are specifically meant as distracting bait."
print ""
print "BASIC USE:"
print ""
print "Cloakify Factory will guide you through each step. Follow the prompts and"
print "it will show you the way."
print ""
print "Cloakify a Payload:"
print "- Select 'Cloakify a File' (any filetype will work - zip, binaries, etc.)"
print "- Enter filename that you want to Cloakify (can be filename or filepath)"
print "- Enter filename that you want to save the cloaked file as"
print "- Select the cipher you want to use"
print "- Select a Noise Generator if desired"
print "- Preview cloaked file if you want to check the results"
print "- Transfer cloaked file via whatever method you prefer"
print ""
print "Decloakify a Payload:"
print "- Receive cloaked file via whatever method you prefer"
print "- Select 'Decloakify a File'"
print "- Enter filename of cloaked file (can be filename or filepath)"
print "- Enter filename to save decloaked file to"
print "- Preview cloaked file to review which Noise Generator and Cipher you used"
print "- If Noise Generator was used, select matching Generator to remove noise"
print "- Select the cipher used to cloak the file"
print "- Your decloaked file is ready to go!"
print ""
print "You can browse the ciphers and outputs of the Noise Generators to get"
print "an idea of how to cloak files for your own needs."
print ""
print "Anyone using the same cipher can decloak your cloaked file, but you can"
print "randomize (scramble) the preinstalled ciphers. See 'randomizeCipherExample.txt'"
print "in the Cloakify directory for an example."
print ""
print "NOTE: Cloakify is not a secure encryption scheme. It's vulnerable to"
print "frequency analysis attacks. Use the 'Add Noise' option to add entropy when"
print "cloaking a payload to help degrade frequency analysis attacks. Be sure to"
print "encrypt the file prior to cloaking if secrecy is needed."
def About():
print ""
print "===================== About Cloakify Factory ====================="
print ""
print " \"Hide & Exfiltrate Any Filetype in Plain Sight\""
print ""
print " Written by TryCatchHCF"
print " https://github.com/TryCatchHCF/Cloakify"
print ""
print "Data Exfiltration In Plain Sight; Evade DLP/MLS Devices; Social Engineering"
print "of Analysts; Defeat Data Whitelisting Controls; Evade AV Detection. Text-based"
print "steganography usings lists. Convert any file type (e.g. executables, Office,"
print "Zip, images) into a list of everyday strings. Very simple tools, powerful"
print "concept, limited only by your imagination."
print ""
print "Cloakify Factory uses Python scripts to cloak / uncloak any file type using"
print "list-based ciphers (text-based steganography). Allows you to transfer data"
print "across a secure network's perimeter without triggering alerts, defeating data"
print "whitelisting controls, and derailing analyst's review via social engineering"
print "attacks against their workflows. As a bonus, cloaked files defeat signature-"
print "based malware detection tools."
print ""
print "NOTE: Cloakify is not a secure encryption scheme. It's vulnerable to"
print "frequency analysis attacks. Use the 'Add Noise' option to add entropy when"
print "cloaking a payload to help degrade frequency analysis attacks. Be sure to"
print "encrypt the file prior to cloaking if secrecy is needed."
print ""
print "DETAILS:"
print ""
print "Cloakify first Base64-encodes the payload, then applies a cipher to generate"
print "a list of strings that encodes the Base64 payload. Once exfiltrated, use"
print "Decloakify with the same cipher to decode the payload. The ciphers are"
print "designed to appear like harmless / ingorable lists, though some (like MD5"
print "password hashes) are specifically meant as distracting bait."
print ""
print "Prepackaged ciphers include lists of:"
print ""
print "- Amphibians (scientific names)"
print "- Belgian Beers"
print "- Desserts in English, Arabic, Thai, Russian, Hindi, Chinese, Persian, and"
print " Muppet (Swedish Chef)"
print "- Emoji"
print "- evadeAV (smallest cipher space, x3 payload size)"
print "- GeoCoords World Capitals (Lat/Lon)"
print "- GeoCaching Coordinates (w/ Site Names)"
print "- IPv4 Addresses of Popular Websites"
print "- MD5 Password Hashes"
print "- PokemonGo Monsters"
print "- Top 100 Websites"
print "- Ski Resorts"
print "- Status Codes (generic)"
print "- Star Trek characters"
print "- World Beaches"
print "- World Cup Teams"
print ""
print "Prepackaged scripts for adding noise / entropy to your cloaked payloads:"
print ""
print "- prependEmoji.py: Adds a randomized emoji to each line"
print "- prependID.py: Adds a randomized ID tag to each line"
print "- prependLatLonCoords.py: Adds random LatLong coordinates to each line"
print "- prependTimestamps.py: Adds timestamps (log file style) to each line"
print ""
print "CREATE YOUR OWN CIPHERS:"
print ""
print "Cloakify Factory is at its best when you're using your own customized"
print "ciphers. The default ciphers may work for most needs, but in a unique"
print "exfiltration scenario you may need to build your own."
print ""
print "Creating a Cipher:"
print ""
print "- Create a list of at least 66 unique words/phrases/symbols (Unicode accepted)"
print "- Randomize the list order"
print "- Remove all duplicate entries and all blank lines"
print "- Place cipher file in the 'ciphers/' subdirectory"
print "- Re-run Cloakify Factory to automatically load the new cipher"
print "- Test cloaking / decloaking with new cipher before using operationally"
print ""
def MainMenu():
print " ____ _ _ _ __ ______ _ "
print " / __ \ | | | |_|/ _| | ___| | | "
print "| / \/ | ___ __ _| | ___| |_ _ _ | |_ __ _ ___| |_ ___ _ __ _ _ "
print "| | | |/ _ \ / _` | |/ / | _| | | | | _/ _` |/ __| __/ _ \| '__| | | |"
print "| \__/\ | |_| | |_| | <| | | | |_| | | || |_| | |__| || |_| | | | |_| |"
print " \____/_|\___/ \__,_|_|\_\_|_| \__, | \_| \__,_|\___|\__\___/|_| \__, |"
print " __/ | __/ |"
print " |___/ |___/ "
print ""
print " \"Hide & Exfiltrate Any Filetype in Plain Sight\""
print ""
print " Written by TryCatchHCF"
print " https://github.com/TryCatchHCF"
print " (\~---."
print " / (\-`-/)"
print " ( ' ' ) data.xls image.jpg \\ List of emoji, IP addresses,"
print " \ ( \_Y_/\\ ImADolphin.exe backup.zip --> sports teams, desserts,"
print " \"\"\ \___// LoadMe.war file.doc / beers, anything you imagine"
print " `w \""
selectionErrorMsg = "1-7 are your options. Try again."
notDone = 1
while ( notDone ):
print ""
print "==== Cloakify Factory Main Menu ===="
print ""
print "1) Cloakify a File"
print "2) Decloakify a File"
print "3) Browse Ciphers"
print "4) Browse Noise Generators"
print "5) Help / Basic Usage"
print "6) About Cloakify Factory"
print "7) Exit"
print ""
invalidSelection = 1
while ( invalidSelection ):
try:
choice = int( raw_input( "Selection: " ))
if ( choice > 0 and choice < 8 ):
invalidSelection = 0
else:
print selectionErrorMsg
except ValueError:
print selectionErrorMsg
if choice == 1:
CloakifyFile()
elif choice == 2:
DecloakifyFile()
elif choice == 3:
BrowseCiphers()
elif choice == 4:
BrowseNoise()
elif choice == 5:
Help()
elif choice == 6:
About()
elif choice == 7:
notDone = 0
else:
print selectionErrorMsg
byeArray = ("Bye!", "Ciao!", "Adios!", "Aloha!", "Hei hei!", "Bless bless!", "Hej da!", "Tschuss!", "Adieu!", "Cheers!")
print ""
print random.choice( byeArray )
print ""
# ============================== Main Loop ================================
#
MainMenu()
|
StarcoderdataPython
|
1769435
|
<gh_stars>1-10
"""
参数化clock装饰器
"""
import time
DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) ->{result}'
def clock(fmt=DEFAULT_FMT): # 参数化装饰器工厂函数
def decorate(func): # 真正的装饰器
def clocked(*_args): # 包装被装饰的函数
t0 = time.time()
_result = func(*_args) # 被装饰的函数返回额真正结果
elapsed = time.time() - t0
name = func.__name__
args = ', '.join(repr(arg)
for arg in _args) # _args是clocked的参数,args是显示用的字符串
result = repr(_result) # 字符串表示用于显示
# 使用**locals(0是为了在fmt中引用clocked的局部变量)
print(fmt.format(**locals()))
return _result
return clocked
return decorate
def main():
@clock()
def snooze(seconds):
time.sleep(seconds)
@clock('{name}: {elapsed}s')
def snooze1(seconds):
time.sleep(seconds)
@clock('{name}({args} dt={elapsed:0.3f}s)')
def snooze2(seconds):
time.sleep(seconds)
for _ in range(3):
snooze(.123)
snooze1(.456)
snooze2(.789)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11267301
|
<reponame>niacdoial/armory
from arm.logicnode.arm_nodes import *
class TimeNode(ArmLogicTreeNode):
"""Returns the application execution time and the delta time."""
bl_idname = 'LNTimeNode'
bl_label = 'Get Application Time'
arm_version = 1
def init(self, context):
super(TimeNode, self).init(context)
self.add_output('NodeSocketFloat', 'Time')
self.add_output('NodeSocketFloat', 'Delta')
|
StarcoderdataPython
|
11229289
|
import unittest
import json
from pyvdk.tools import Keyboard, TextButton
class KeyboardTests(unittest.TestCase):
def test_limit(self):
# Arrange
keyboard = Keyboard(inline=True)
b = [
TextButton(
color='w',
label=str(i),
payload=str(i)
)
for i in range(6)
]
# Act
keyboard.extend(b)
keyboard[1].extend(b)
# Assert
with self.assertRaises(Exception):
keyboard[2].extend(b)
def test_structure(self):
# Arrange
keyboard = Keyboard(one_time=True)
b = [
TextButton(
color='w',
label=str(i + 1),
payload=str(i + 1)
)
for i in range(3)
]
keyboard.extend(b[:2])
keyboard[2].append(b[2])
# Act
data = json.loads(keyboard())
# Assert
self.assertTrue(data['one_time'])
self.assertFalse(data['inline'])
self.assertEqual(len(data['buttons'][0]), 2)
self.assertEqual(len(data['buttons'][1]), 1)
self.assertEqual(data['buttons'][0][1]['action']['label'], "2")
self.assertEqual(data['buttons'][1][0]['action']['payload'], "{\"command\": \"3\"}")
|
StarcoderdataPython
|
9613089
|
<filename>more_collections/sorted/_sorted_iterable.py
from __future__ import annotations
import sys
from typing import Generic, TypeVar
if sys.version_info < (3, 9):
from typing import Iterable, Iterator
else:
from collections.abc import Iterable, Iterator
from ._abc_iterable import SortedIterable, SortedIterator
__all__ = ["SortedUserIterable", "SortedUserIterator"]
T = TypeVar("T")
class SortedUserIterable(SortedIterable[T], Generic[T]):
__iterable: Iterable[T]
def __init__(self: SortedUserIterable[T], iterable: Iterable[T], /) -> None:
if isinstance(iterable, Iterable):
self.__iterable = iterable
else:
raise TypeError(f"{type(self).__name__} expected an iterable, got {iterable!r}")
def __iter__(self: SortedUserIterable[T], /) -> SortedUserIterator[T]:
return SortedUserIterator(self.__iterable)
class SortedUserIterator(SortedIterator[T], Generic[T]):
__iterator: Iterator[T]
def __init__(self: SortedUserIterator[T], iterable: Iterable[T], /) -> None:
if isinstance(iterable, Iterable):
self.__iterator = iter(iterable)
else:
raise TypeError(f"{type(self).__name__} expected an iterable, got {iterable!r}")
def __next__(self: SortedUserIterator[T], /) -> T:
return next(self.__iterator)
|
StarcoderdataPython
|
8147711
|
import math
class Circulo():
def __init__(self):
super()
self.__raio = None
def get_perimetro(self):
return 2 * math.pi * self.raio
def get_area(self):
return math.pi * self.raio ** 2
@property
def raio(self):
return self.__raio
@raio.setter
def raio(self, x):
self.__raio = x
def __setattr__(self, key, value):
if not hasattr(self, key):
raise TypeError("Não pode criar atributos para esta classe")
object.__setattr__(self, key, value)
c = Circulo()
c.raio = 2 # ok
c.lado = 2 # AttributeError
#https://pt.stackoverflow.com/q/220908/101
|
StarcoderdataPython
|
12818857
|
<reponame>mhungerford/pebble-glracer
#!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2013
"""Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html"""
import json
import os
from waflib import Logs, TaskGen, Task
from waflib.Tools import c, cxx
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as json"
database_file = ctx.bldnode.make_node('compile_commands.json')
file_path = str(database_file.path_from(ctx.path))
if not os.path.exists(file_path):
with open(file_path, 'w') as f:
f.write('[]')
Logs.info("Store compile comands in %s" % file_path)
clang_db = dict((x["file"], x) for x in json.load(database_file))
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
filename = task.inputs[0].abspath()
entry = {
"directory" : getattr(task, 'cwd', ctx.variant_dir),
"command" : " ".join(cmd),
"file" : filename,
}
clang_db[filename] = entry
database_file.write(json.dumps(clang_db.values(), indent=2))
def options(opt):
"opitions for clang_compilation_database"
pass
def configure(cfg):
"configure for clang_compilation_database"
pass
|
StarcoderdataPython
|
9787361
|
"""
Easier factory functions for creating Pymunk objects.
"""
from functools import wraps
from typing import Sequence, Union, Callable, TypeVar, TYPE_CHECKING, Any
from . import Color, DrawOptions
from .helpers import get_pyxel
from ..core import CircleBody, SegmentBody, PolyBody, Body, Space
from ..typing import VecLike
if TYPE_CHECKING:
from .camera import Camera
T = TypeVar("T")
ColorT = Union[Color, int]
DEFAULT_PYXEL = get_pyxel()
DEFAULT_SPACE = None
DEFAULT_COLOR = None
MOMENT_MULTIPLIER = 5.0
DEFAULT_ELASTICITY = 0.0
DEFAULT_FRICTION = 0.0
def body_maker_function(col_arg, func): # type: ignore
"""
Decorate function that normalize input arguments and outputs for a pyxel
context.
"""
@wraps(func)
def maker(*args, **kwargs):
args = list(args)
kwargs.setdefault("space", DEFAULT_SPACE)
kwargs.setdefault("elasticity", DEFAULT_ELASTICITY)
kwargs.setdefault("friction", DEFAULT_FRICTION)
target_body = kwargs.pop("body", None)
if len(args) > col_arg:
color = args.pop(col_arg)
elif "color" in kwargs:
color = kwargs.pop("color")
else:
color = DEFAULT_COLOR
body = func(*args, **kwargs)
if "moment" not in kwargs:
body.moment *= MOMENT_MULTIPLIER
if color is not None:
body.shapes.apply(color=color)
body.color = color
if target_body is not None:
for shape in body.shapes:
shape_ = shape.copy(body=target_body)
if target_body.space:
target_body.space.add(shape_)
body = target_body
return body
return maker
# We use this trick to convince static analysis of the right types while
# sidekick do not provide good types for Curried functions.
def body_maker(n: int) -> Callable[[T], T]:
def decorator(fn: T) -> T:
return body_maker_function(n, fn) # type: ignore
return decorator
#
# Basic geometric shapes
#
@body_maker(3)
def circ(x: float, y: float, r: float, col: int = None, **kwargs) -> CircleBody:
"""
Creates a body with a Circle shape attached to it.
Args:
x: Center point x coordinate
y: Center point y coordinate
r: Circle radius
col: Object's color
"""
if col is not None:
kwargs["color"] = col
return CircleBody(r, position=(x, y), **kwargs)
@body_maker(4)
def line(
x1: float,
y1: float,
x2: float,
y2: float,
col: int = None,
radius: float = 1.0,
**kwargs
) -> SegmentBody:
"""
Creates a body with a Segment shape attached to it.
Args:
x1: x coordinate of starting point
y1: y coordinate of starting point
x2: x coordinate of ending point
y2: y coordinate of ending point
col: Object's color
radius (float): Collision radius for line element.
"""
if col is not None:
kwargs["color"] = col
return SegmentBody((x1, y1), (x2, y2), radius=radius, **kwargs)
@body_maker(6)
def tri(
x1: float,
y1: float,
x2: float,
y2: float,
x3: float,
y3: float,
col: int = None,
radius: float = 0.0,
**kwargs
) -> PolyBody:
"""
Creates a Pymunk body with a triangular Poly shape attached to it.
Args:
x1: x coordinate of first point
y1: y coordinate of first point
x2: x coordinate of second point
y2: y coordinate of second point
x3: x coordinate of last point
y3: y coordinate of last point
col: Object's color
radius: Collision radius for line element.
"""
if col is not None:
kwargs["color"] = col
return PolyBody([(x1, y1), (x2, y2), (x3, y3)], radius=radius, **kwargs)
@body_maker(4)
def rect(
x: float,
y: float,
w: float,
h: float,
col: int = None,
radius: float = 0.0,
**kwargs
) -> PolyBody:
"""
Creates a Pymunk body with a triangular Poly shape attached to it.
Args:
x: x coordinate of starting point
y: y coordinate of starting point
w: width
h: height
col: Object's color
radius: Collision radius for line element.
"""
x_ = x + w / 2
y_ = y + h / 2
if col is not None:
kwargs["color"] = col
return PolyBody.new_box((w, h), position=(x_, y_), radius=radius, **kwargs)
@body_maker(1)
def poly(
vs: Sequence[VecLike], col: int = None, radius: float = 0.0, **kwargs
) -> PolyBody:
"""
Creates a Pymunk body with a polygonal shape attached to it.
Args:
vs: sequence of vertices.
col: Object's color
radius: collision radius tolerance.
"""
if col is not None:
kwargs["color"] = col
return PolyBody(vs, radius=radius, **kwargs)
@body_maker(4)
def margin(
x: float = 0,
y: float = 0,
width: float = None,
height: float = None,
col: int = None,
**kwargs
) -> Body:
"""
Creates a margin around the screen.
"""
pyxel = getattr(DEFAULT_SPACE, "pyxel", DEFAULT_PYXEL)
if width is None:
width = pyxel.width - 1
if height is None:
height = pyxel.height - 1
a, b, c, d = (x, y), (x + width, y), (x + width, y + height), (x, y + height)
# noinspection PyProtectedMember
if col is not None:
kwargs["color"] = col
opts = {k: kwargs.pop(k) for k in Body._init_kwargs if k in kwargs}
body = Body(body_type=Body.STATIC, **opts)
body.create_segment(a, b, **kwargs)
body.create_segment(b, c, **kwargs)
body.create_segment(c, d, **kwargs)
body.create_segment(d, a, **kwargs)
return body
class PhysSpace(Space):
_init_kwargs = {
*Space._init_kwargs,
"background_color",
"sub_steps",
"camera",
"pyxel",
}
background_color: int = 0
sub_steps: int = 1
camera: "Camera"
pyxel: Any = None
def update(self, dt: float = None):
"""
Default update step for space.
"""
if dt is None:
fps = getattr(self.pyxel, "DEFAULT_FPS", 30)
dt = 1 / fps
self.step(dt, self.sub_steps, skip_events=("after-step",))
def draw(self, options=None, clear: Union[bool, Color] = False):
"""
Draw space on screen.
"""
if clear is True:
self.pyxel.cls(self.background_color)
elif not isinstance(clear, bool):
self.pyxel.cls(clear)
self.debug_draw(options or self.draw_options)
self._execute_step_handlers("after-step")
def run(self):
"""
Run pyxel engine alongside with physics.
"""
self.pyxel.run(self.update, lambda: self.draw(clear=True))
# noinspection PyTypeHints
def space(
bg: ColorT = Color.BLACK,
col: ColorT = Color.WHITE,
camera=None,
draw_options=None,
wireframe: bool = False,
friction: float = 0.0,
elasticity: float = 0.0,
sub_steps: int = 1,
pyxel: Any = None,
**kwargs
) -> PhysSpace:
"""
Create a space object.
Args:
bg:
Background color. If set to None, prevents clearing screen.
col:
Default foreground color.
camera:
A module of functions with all drawing functions of the Pyxel API.
This can be used to implement cameras or to implement transformations
before committing pixels to the screen.
draw_options:
An instance of :class:`easymunk.pyxel.DebugDraw()`. Takes precedence
over the camera and wireframe options.
wireframe:
Draw shapes in wireframe mode.
elasticity:
Default elasticity of shapes in space.
friction:
Default friction of shapes in space.
sub_steps:
The number of physics sub-steps to perform at each iteration.
Keyword Args:
It accepts arguments of :class:`easymunk.Space`.
"""
global DEFAULT_SPACE, DEFAULT_FRICTION, DEFAULT_ELASTICITY, DEFAULT_COLOR
if pyxel is None:
pyxel = get_pyxel()
if camera is None:
camera = pyxel
if draw_options is None:
draw_options = DrawOptions(camera, wireframe=wireframe, keep_shape_colors=True)
else:
draw_options = draw_options
kwargs["background_color"] = bg
kwargs["sub_steps"] = sub_steps
kwargs["camera"] = camera
DEFAULT_FRICTION = friction
DEFAULT_ELASTICITY = elasticity
DEFAULT_COLOR = col
DEFAULT_SPACE = PhysSpace(draw_options=draw_options, pyxel=pyxel, **kwargs)
return DEFAULT_SPACE
def moment_multiplier(value: float = None) -> float:
"""
Default multiplier used to calculate the moment of standard shapes.
Call with argument to set value, and return value if called with no
arguments.
"""
global MOMENT_MULTIPLIER
if value is None:
return MOMENT_MULTIPLIER
else:
MOMENT_MULTIPLIER = float(value)
return MOMENT_MULTIPLIER
def reset_space():
"""
Reset the default space.
"""
global DEFAULT_SPACE
DEFAULT_SPACE = None
|
StarcoderdataPython
|
3377062
|
<reponame>pcarivbts/vbts-webadmin<filename>vbts_webadmin/tests/locust/locustfile_api_promo.py
"""
Copyright (c) 2015-present, Philippine-California Advanced Research Institutes-
The Village Base Station Project (PCARI-VBTS). All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from locust import HttpLocust
from locust import TaskSet
from locust import task
base_url = 'http://127.0.0.1:7000'
test_promokey = 'SDISC'
test_imsi = 'IMSI001010000000008'
test_imsi2 = 'IMSI001010000000009'
class PromoSubscriptionBehavior(TaskSet):
@task(1)
def promo_status_normal(self):
url = base_url + '/api/promo/status'
name = 'promo_status_normal'
data = {'imsi': test_imsi, 'keyword': test_promokey}
self.client.post(url=url, data=data, name=name)
@task(1)
def promo_subscribe(self):
"""
NOTE: give test_imsi2 a large amount of credit
if we are going to do repetitive tests
"""
data = {'imsi': test_imsi2, 'keyword': test_promokey}
url = base_url + '/api/promo/subscribe'
self.client.post(url=url, data=data)
url = base_url + '/api/promo/unsubscribe'
self.client.post(url=url, data=data)
class PromoQuotaUsageBehavior(TaskSet):
@task(1)
def promo_get_min_bal(self):
url = base_url + '/api/promo/getminbal'
data = {
'trans': 'U_local_sms',
'tariff': 100000
}
self.client.post(url=url, data=data)
@task(1)
def promo_get_service_tariff(self):
url = base_url + '/api/promo/getservicetariff'
data = {
'imsi': test_imsi,
'trans': 'U_local_sms',
'dest': ''
}
self.client.post(url=url, data=data)
@task(1)
def promo_get_service_type(self):
url = base_url + '/api/promo/getservicetype'
data = {
'imsi': test_imsi,
'trans': 'local_sms',
'dest': '631234567'
}
self.client.post(url=url, data=data)
@task(1)
def promo_get_seconds_available(self):
url = base_url + '/api/promo/getsecavail'
data = {
'imsi': test_imsi,
'trans': 'U_local_sms',
'balance': 1000000,
'dest': '6312345678'
}
self.client.post(url=url, data=data)
@task
def promo_quota_deduct(self):
url = base_url + '/api/promo/deduct'
data = {
'imsi': test_imsi,
'trans': 'U_local_sms',
'amount': 100000,
'dest': '6312345678'
}
# applicable only for bulk type
with self.client.post(url=url, data=data,
catch_response=True) as response:
if response.status_code == 400:
response.success()
class UserBehavior(TaskSet):
tasks = {
PromoSubscriptionBehavior: 1,
PromoQuotaUsageBehavior: 1
}
class WebsiteUser(HttpLocust):
host = 'http://1172.16.31.10:7000'
task_set = UserBehavior
min_wait = 1000
max_wait = 5000
|
StarcoderdataPython
|
1761779
|
<gh_stars>0
# Generated by Django 2.1.5 on 2019-06-09 09:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gpsschool', '0013_auto_20190609_0557'),
]
operations = [
migrations.AddField(
model_name='task',
name='subject',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='gpsschool.Subject'),
),
]
|
StarcoderdataPython
|
6458655
|
"""Util functions."""
import logging
from functools import wraps
from pathlib import Path
from morphio.mut import Morphology
from tqdm import tqdm
tqdm.pandas()
EXTS = {".asc", ".h5", ".swc"} # allowed extensions
def is_morphology(filename):
"""Returns True if the extension is supported."""
try:
Morphology(filename)
ext = Path(filename).suffix.lower()
return ext in EXTS, ext
except Exception: # pylint: disable=broad-except
return False, None
def silent_logger(log_name):
"""A decorator to silent a logger during the function execution."""
def _silent_logger(function):
@wraps(function)
def decorated_func(*args, **kwargs):
func_logger = logging.getLogger(log_name)
func_logger.disabled = True
try:
return function(*args, **kwargs)
finally:
func_logger.disabled = False
return decorated_func
return _silent_logger
|
StarcoderdataPython
|
3267265
|
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import sys
# Parse arguments
if len(sys.argv) != 3:
print('Incorrect number of args')
exit()
engine_path = sys.argv[1]
if not os.path.exists(engine_path):
print(f'Given path {engine_path} does not exist')
exit()
project_path = sys.argv[2]
if not os.path.exists(project_path):
print(f'Given path {project_path} does not exist')
exit()
sys.path.insert(0, os.path.join(engine_path, 'Gems/Atom/RPI/Tools/'))
from atom_rpi_tools.pass_data import PassTemplate
import atom_rpi_tools.utils as utils
# Folder of this py file
dir_name = os.path.dirname(os.path.realpath(__file__))
# Patch render pipeline to insert a custom LyShine parent pass
# Gem::Atom_Feature_Common gem's path since default render pipeline is comming from this gem
gem_assets_path = os.path.join(engine_path,'Gems/Atom/feature/Common/Assets/')
pipeline_relatvie_path = 'Passes/MainPipeline.pass'
srcRenderPipeline = os.path.join(gem_assets_path, pipeline_relatvie_path)
destRenderPipeline = os.path.join(project_path, pipeline_relatvie_path)
# If the project doesn't have a customized main pipeline
# copy the default render pipeline from Atom_Common_Feature gem to same path in project folder
utils.find_or_copy_file(destRenderPipeline, srcRenderPipeline)
# Load project render pipeline
renderPipeline = PassTemplate(destRenderPipeline)
# Skip if LyShinePass already exist
newPassName = 'LyShinePass'
if renderPipeline.find_pass(newPassName)>-1:
print('Skip merging. LyShinePass already exists')
exit()
# Insert LyShinePass between DebugOverlayPass and UIPass
refPass = '<PASSWORD>'
# The data file for new pass request is in the same folder of the py file
newPassRequestFilePath = os.path.join(dir_name, 'LyShinePass.data')
newPassRequestData = utils.load_json_file(newPassRequestFilePath)
insertIndex = renderPipeline.find_pass(refPass) + 1
if insertIndex>-1:
renderPipeline.insert_pass_request(insertIndex, newPassRequestData)
else:
print('Failed to find ', refPass)
exit()
# Update attachment references for the passes following LyShinePass
renderPipeline.replace_references_after(newPassName, 'DebugOverlayPass', 'InputOutput', 'LyShinePass', 'ColorInputOutput')
# Save the updated render pipeline
renderPipeline.save()
|
StarcoderdataPython
|
8144729
|
<reponame>kpahawa/quick_config
import os
from quick_config.provider_wrapper import load_config_from_path
from quick_config.validator import validate_config_dir
config = None
_env_key = 'CONFIG_DIR'
__all__ = [
'config',
'load_config',
'setup_config_with_path',
'clear_config',
]
def clear_config():
global config
config = None
def load_config():
wd = os.getcwd()
conf_dir = os.environ.get(_env_key)
if not conf_dir:
conf_dir = "config"
path = os.path.join(wd, conf_dir)
validate_config_dir(path)
return load_config_from_path(path)
def setup_config_with_path(path: str):
"""
sets up the config provider with an explicit path to the location of the desired directory of configs
:param path: the location of the config directory of a project to parse and load
"""
global config
config = load_config_from_path(path)
if __name__ != '__main__':
if config is None:
config = load_config()
|
StarcoderdataPython
|
6642065
|
<filename>CUILArg.py
class CUIL():
def __init__(self, Dni, Sexo):
self.dni = Dni.replace('.', '')
self.sexo = Sexo
self.get = ''
self.get_dni()
self.get_sexo()
self.get_cuil()
def get_dni(self):
if len(self.dni) == 7:
self.dni = '0' + self.dni
return True
elif len(self.dni) != 8:
return False
def get_sexo(self):
if self.sexo.startswith('f'):
self.sexo = 'f'
elif self.sexo.startswith('m'):
self.sexo = 'm'
else:
return False
return True
def get_cuil(self):
self.get = ''
if self.sexo == 'f':
self.get += '27'
else:
self.get += '20'
self.get += '-'
self.get += self.dni
Suma_digitos = str(11 - (int(self.get[0])*5 + int(self.get[1])*4+ int(self.get[3])*3+ int(self.get[4])*2+ int(self.get[5])*7+ int(self.get[6])*6+ int(self.get[7])*5+ int(self.get[8])*4+ int(self.get[9])*3+ int(self.get[10])*2) % 11)
if Suma_digitos == '11':
Suma_digitos = '0'
elif Suma_digitos == '10':
Suma_digitos = '9'
self.get += '-'
self.get += Suma_digitos
def get(Dni, Sexo):
return CUIL(Dni = Dni, Sexo = Sexo).get
|
StarcoderdataPython
|
6527373
|
import os
from daskperiment.util.text import trim_indent
class TestText(object):
def test_trim_indent(self):
res = trim_indent("")
assert res == ""
res = trim_indent("ab")
assert res == "ab"
res = trim_indent(os.linesep)
assert res == os.linesep
def test_trim_multi(self):
res = trim_indent("""
aaa""")
assert res == """
aaa"""
res = trim_indent(""" aaa
aaa""")
assert res == """aaa
aaa"""
res = trim_indent(""" aaa
aaa""")
assert res == """aaa
aaa"""
res = trim_indent(""" aaa
aaa""")
assert res == """aaa
aaa"""
def test_trim_multi_endsep(self):
res = trim_indent("""
aaa
""")
assert res == """
aaa
"""
res = trim_indent(""" aaa
aaa
""")
assert res == """aaa
aaa
"""
res = trim_indent(""" aaa
aaa
""")
assert res == """aaa
aaa
"""
def test_trim_multi_blankline(self):
res = trim_indent(""" aaaa
a
aa
aaa
aaaa
aaaaa
""")
assert res == """aaaa
a
"""
|
StarcoderdataPython
|
6651092
|
from lxml import etree
from dataset.data import Data
from strategy.parser.default_parser import DefaultParser
import re
class PostParser(DefaultParser):
def __init__(self):
self.super = super(PostParser, self)
self.super.__init__()
def get_url_value(self):
v = re.search(r'\d*.html$', self._url).group()
return int(re.search(r'^\d*', v).group())
def clear_space(self, content):
# r, n = re.subn(r'>\\n\s*<', '><', content)
r = re.compile(r'\n\s*')
return r.sub('', content)
def _get_new_data(self, dom):
new_datas = []
try:
nodes = dom.xpath("//article//h1//a/text()")
if len(nodes) < 1:
return None
author = nodes[0].encode('utf8')
nodes = dom.xpath("//article//h1/text()")
if len(nodes) < 1:
return None
title = nodes[1].encode('utf8')
content = ''
nodes = dom.xpath("//article//section")
for node in nodes:
content = content + etree.tostring(node, encoding='utf8')
data = Data('post')
data.set('title', title)
data.set('author', self.clear_space(author))
data.set('content', self.clear_space(content))
data.set('id', self.get_url_value())
new_datas.append(data)
except Exception as e:
print(str(e))
return new_datas
def _get_next_page_url(self, dom):
return None
|
StarcoderdataPython
|
5082141
|
<reponame>lh70/s-connect-python
import esp32
from lh_lib.sensors.sensor import AbstractSensor
class Hall(AbstractSensor):
"""
sets an integer of range +- unknown representing the current internal hall sensor reading
"""
def update(self):
self.value = esp32.hall_sensor()
|
StarcoderdataPython
|
9642762
|
from django.test import TestCase
from playlists.models import Playlist
# Create your tests here.
class PlaylistModelTests(TestCase):
def setUp(self):
Playlist.objects.create(code='1', title='playlist title 1')
def test_instance_get_string_repr(self):
""" Playlist object string representation returns its title
"""
playlist_1 = Playlist.objects.get(code='1')
self.assertEquals(str(playlist_1), playlist_1.title)
def test_instance_get_youtube_valid_url(self):
""" All Playlist models have a property that returns the external
youtube url.
"""
playlist_1 = Playlist.objects.get(code='1')
self.assertEquals(playlist_1.youtube_url, 'https://www.youtube.com/playlist?list=1')
|
StarcoderdataPython
|
1613404
|
from leapp.actors import Actor
from leapp.libraries.actor import opensshuseprivilegeseparationcheck
from leapp.models import Report, OpenSshConfig
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class OpenSshUsePrivilegeSeparationCheck(Actor):
"""
UsePrivilegeSeparation configuration option was removed.
Check the value of UsePrivilegeSeparation in OpenSSH server config file
and warn about its deprecation if it is set to non-default value.
"""
name = 'open_ssh_use_privilege_separation'
consumes = (OpenSshConfig, )
produces = (Report, )
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
opensshuseprivilegeseparationcheck.process(self.consume(OpenSshConfig))
|
StarcoderdataPython
|
3375923
|
import torch
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import h5py
import os
from glob import glob
from patch_manager import StridedRollingPatches2D, StridedPatches2D, NoPatches2D
from utils import squeeze_repr
import torch.utils.data as torch_data
import numpy as np
from transforms import RndAugmentationTfs, add_sp_gauss_noise
import torchvision
import nifty.graph.rag as nrag
class SpgDset(torch_data.Dataset):
def __init__(self, root_dir, patch_manager="", patch_stride=None, patch_shape=None, reorder_sp=False,
spatial_augmentation=False,
intensity_augmentation=False,
noise_augmentation=False):
""" dataset for loading images (raw, gt, superpixel segs) and according rags"""
self.norm_tf = torchvision.transforms.Normalize(0, 1, inplace=False)
self.graph_dir = os.path.join(root_dir, 'graph_data')
self.pix_dir = os.path.join(root_dir, 'pix_data')
self.graph_file_names = sorted(glob(os.path.join(self.graph_dir, "*.h5")))
self.pix_file_names = sorted(glob(os.path.join(self.pix_dir, "*.h5")))
self.reorder_sp = reorder_sp
self.intensity_augmentation = intensity_augmentation
if intensity_augmentation:
self.augm_tf = RndAugmentationTfs(patch_shape, n_chnl_for_intensity=1)
self.spatial_augmentation = spatial_augmentation
if spatial_augmentation:
self.spatial_augmentation = spatial_augmentation
self.intensity_augmentation = intensity_augmentation
self.noise_augmentation = noise_augmentation
pix_file = h5py.File(self.pix_file_names[0], 'r')
self.image_shape = pix_file["gt"][:].shape
if patch_manager == "rotated":
self.pm = StridedRollingPatches2D(patch_stride, patch_shape, self.image_shape)
elif patch_manager == "no_cross":
self.pm = StridedPatches2D(patch_stride, patch_shape, self.image_shape)
else:
self.pm = NoPatches2D()
self.length = len(self.graph_file_names) * np.prod(self.pm.n_patch_per_dim)
print('found ', self.length, " data files")
def __len__(self):
return self.length
def viewItem(self, idx):
pix_file = h5py.File(self.pix_file_names[idx], 'r')
graph_file = h5py.File(self.graph_file_names[idx], 'r')
raw = pix_file["raw"][:]
gt = pix_file["gt"][:]
sp_seg = graph_file["node_labeling"][:]
fig, (a1, a2, a3) = plt.subplots(1, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})
a1.imshow(raw, cmap='gray')
a1.set_title('raw')
a2.imshow(cm.prism(gt/gt.max()))
a2.set_title('gt')
a3.imshow(cm.prism(sp_seg/sp_seg.max()))
a3.set_title('sp')
plt.tight_layout()
plt.show()
def __getitem__(self, idx):
img_idx = idx // np.prod(self.pm.n_patch_per_dim)
patch_idx = idx % np.prod(self.pm.n_patch_per_dim)
pix_file = h5py.File(self.pix_file_names[img_idx], 'r')
graph_file = h5py.File(self.graph_file_names[img_idx], 'r')
raw = pix_file["raw_2chnl"][:]
if raw.ndim == 2:
raw = torch.from_numpy(raw.astype(np.float)).float().unsqueeze(0)
else:
raw = torch.from_numpy(raw.astype(np.float)).float()
# raw -= raw.min()
# raw /= raw.max()
nc = raw.shape[0]
gt = torch.from_numpy(pix_file["gt"][:].astype(np.long)).unsqueeze(0).float()
sp_seg = torch.from_numpy(graph_file["node_labeling"][:].astype(np.long)).unsqueeze(0).float()
affs = torch.from_numpy(graph_file["affinities"][:].astype(np.float)).float()
offs = torch.from_numpy(graph_file["offsets"][:]).long()
augm_or_not = torch.randint(0, 3, (3,))
all = torch.cat([raw, gt, sp_seg, affs], 0)
patch = self.pm.get_patch(all, patch_idx)
if augm_or_not[0] == 0 and self.spatial_augmentation:
spat_tf, int_tf = self.augm_tf.sample(1, 1)
patch = spat_tf(patch)
if not self.reorder_sp:
return patch[:nc], patch[nc:nc+1], patch[nc+1:nc+2], patch[nc+2:], offs, torch.tensor([img_idx, patch_idx])
gt = patch[nc:nc+1]
sp_seg = patch[nc+1:nc+2]
augm_raw = patch[:nc]
if augm_or_not[1] == 0 and self.intensity_augmentation:
spat_tf, int_tf = self.augm_tf.sample(1, 1)
augm_raw = int_tf(augm_raw)
if augm_or_not[2] == 0 and self.noise_augmentation:
augm_raw = add_sp_gauss_noise(augm_raw, 0.2, 0.1, 0.3)
# relabel to consecutive ints starting at 0
un = torch.unique(sp_seg)
mask = sp_seg == un[:, None, None]
sp_seg = (mask * (torch.arange(len(un), device=sp_seg.device)[:, None, None] + 1)).sum(0) - 1
un = torch.unique(gt)
mask = gt == un[:, None, None]
gt = (mask * (torch.arange(len(un), device=gt.device)[:, None, None] + 1)).sum(0) - 1
return augm_raw, gt.float()[None], sp_seg.float()[None], patch[nc+2:], offs, torch.tensor([img_idx, patch_idx])
def get_graphs(self, indices, patches, device="cpu"):
edges, edge_feat, diff_to_gt, gt_edge_weights, affs = [], [], [], [], []
for idx, patch in zip(indices, patches):
img_idx, patch_idx = idx[0], idx[1]
nodes = torch.unique(patch).unsqueeze(-1).unsqueeze(-1)
graph_file = h5py.File(self.graph_file_names[img_idx], 'r')
# get subgraph defined by patch overlap
es = torch.from_numpy(graph_file["edges"][:]).to(device).sort(0)[0]
iters_1 = (es.unsqueeze(0) == nodes).float().sum(0).sum(0) >= 2
es = es[:, iters_1]
nodes, es, patch = squeeze_repr(nodes.squeeze(-1).squeeze(-1), es, patch.squeeze(0))
rag = nrag.gridRag(patch.squeeze(0).long().cpu().numpy(), int(patch.max()) + 1,
numberOfThreads=1)
_edges = torch.from_numpy(rag.uvIds().astype(np.int)).to(device).T.sort(0)[0]
iters_2 = ((es.unsqueeze(1) == _edges.unsqueeze(-1)).float().sum(0) == 2.0).sum(0) == 1
es = es[:, iters_2]
edges.append(es)
edge_feat.append(torch.from_numpy(graph_file["edge_feat"][:]).to(device)[iters_1][iters_2])
diff_to_gt.append(torch.tensor(graph_file["diff_to_gt"][()], device=device))
gt_edge_weights.append(torch.from_numpy(graph_file["gt_edge_weights"][:]).to(device)[iters_1][iters_2])
affs.append(self.pm.get_patch(torch.from_numpy(graph_file["affinities"][:]).to(device), patch_idx))
return edges, edge_feat, diff_to_gt, gt_edge_weights, affs
if __name__ == "__main__":
set = SpgDset()
ret = set.get(3)
a=1
|
StarcoderdataPython
|
208585
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sistema', '0023_auto_20170105_1514'),
]
operations = [
migrations.RenameField(
model_name='detallefactura',
old_name='total',
new_name='subtotal',
),
]
|
StarcoderdataPython
|
8008180
|
def add(x, y):
return (x+y)
def mul(x, y):
return (x*y)
def sub(x, y):
return (x-y)
def div(x, y):
return (x/y)
|
StarcoderdataPython
|
1631979
|
#!/usr/bin/python
import os
import sys
import binascii
import grpc
from lndlibs import rpc_pb2 as ln
from lndlibs import rpc_pb2_grpc as lnrpc
from pathlib2 import Path
print("This is the legacy - Python2 only - version.")
if sys.version_info > (3, 0):
print("Can't run on Python3")
sys.exit()
# display config script info
if len(sys.argv) <= 1 or sys.argv[1] == "-h" or sys.argv[1] == "help":
print("# ! always activate virtual env first: source /home/admin/python-env-lnd/bin/activate")
print("# ! and run with with: python /home/admin/config.scripts/lnd.initwallet.py")
print("# creating or recovering the LND wallet")
print("# lnd.initwallet.py new [walletpassword] [?seedpassword]")
print("# lnd.initwallet.py seed [walletpassword] [\"seeds-words-seperated-spaces\"] [?seedpassword]")
print("# lnd.initwallet.py scb [walletpassword] [\"seeds-words-seperated-spaces\"] [filepathSCB] [?seedpassword]")
print("err='missing parameters'")
sys.exit(1)
mode = sys.argv[1]
def new(stub, wallet_password="", seed_entropy=None):
if seed_entropy:
# provide 16-bytes of static data to get reproducible seeds for TESTING!)
print("WARNING: Use this for testing only!!")
request = ln.GenSeedRequest(seed_entropy=seed_entropy)
else:
request = ln.GenSeedRequest()
try:
response = stub.GenSeed(request)
seed_words = response.cipher_seed_mnemonic
seed_words_str = ', '.join(seed_words)
print("seedwords='" + seed_words_str + "'")
# add a 6x4 formatted version to the output
seed_words_6x4 = ""
for i in range(0, len(seed_words)):
if i % 6 == 0 and i != 0:
seed_words_6x4 = seed_words_6x4 + "\n"
single_word = str(i + 1) + ":" + seed_words[i]
while len(single_word) < 12:
single_word = single_word + " "
seed_words_6x4 = seed_words_6x4 + single_word
print("seedwords6x4='" + seed_words_6x4 + "'")
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError GenSeedRequest'")
print("errMore='" + details + "'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='GenSeedRequest'")
sys.exit(1)
request = ln.InitWalletRequest(
wallet_password=<PASSWORD>,
cipher_seed_mnemonic=seed_words
)
try:
response = stub.InitWallet(request)
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError InitWallet'")
print("errMore='" + details + "'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='InitWallet'")
sys.exit(1)
def seed(stub, wallet_password="", seed_words="", seed_password=""):
request = ln.InitWalletRequest(
wallet_password=<PASSWORD>_password,
cipher_seed_mnemonic=seed_words,
recovery_window=250,
aezeed_passphrase=seed_password
)
try:
response = stub.InitWallet(request)
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError InitWallet'")
print("errMore='" + details + "'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='InitWallet'")
sys.exit(1)
def scb(stub, wallet_password="", seed_words="", seed_password="", file_path_scb=""):
with open(file_path_scb, 'rb') as f:
content = f.read()
scb_hex_str = binascii.hexlify(content)
print(scb_hex_str)
request = ln.InitWalletRequest(
wallet_password=<PASSWORD>,
cipher_seed_mnemonic=seed_words,
recovery_window=250,
aezeed_passphrase=<PASSWORD>,
channel_backups=scb_hex_str
)
try:
response = stub.InitWallet(request)
except grpc.RpcError as rpc_error_call:
code = rpc_error_call.code()
print >> sys.stderr, code
details = rpc_error_call.details()
print("err='RPCError InitWallet'")
print("errMore='" + details + "'")
sys.exit(1)
except:
e = sys.exc_info()[0]
print >> sys.stderr, e
print("err='InitWallet'")
sys.exit(1)
print("err='TODO: implement creating from seed/scb'")
sys.exit(1)
def parse_args():
wallet_password = ""
seed_words = ""
seed_password = ""
filepath_scb = ""
if mode == "new":
print("# *** CREATING NEW LND WALLET ***")
if len(sys.argv) > 2:
wallet_password = sys.argv[2]
if len(wallet_password) < 8:
print("err='wallet password is too short'")
sys.exit(1)
else:
print("err='wallet password is too short'")
sys.exit(1)
if len(sys.argv) > 3:
seed_password = sys.argv[3]
elif mode == "seed" or mode == "scb":
if len(sys.argv) > 2:
wallet_password = sys.argv[2]
if len(wallet_password) < 8:
print("err='wallet password is too short'")
sys.exit(1)
else:
print("err='not correct amount of parameter - missing wallet password'")
sys.exit(1)
if len(sys.argv) > 3:
seed_word_str = sys.argv[3]
seed_words = seed_word_str.split(" ")
if len(seed_words) < 24:
print("err='not 24 seed words separated by just spaces (surrounded with \")'")
sys.exit(1)
else:
print("err='not correct amount of parameter - missing seed string'")
sys.exit(1)
if mode == "seed":
if len(sys.argv) > 4:
seed_password = sys.argv[4]
elif mode == "scb":
if len(sys.argv) > 4:
filepath_scb = sys.argv[4]
scb_file = Path(filepath_scb)
if scb_file.is_file():
print("# OK SCB file exists")
else:
print("err='the given filepathSCB - file does not exists or no permission'")
sys.exit(1)
else:
print("err='not correct amount of parameter - missing seed filepathSCB'")
sys.exit(1)
if len(sys.argv) > 5:
seed_password = sys.argv[4]
else:
print("err='unknown mode parameter - run without any parameters to see options'")
sys.exit(1)
return wallet_password, seed_words, seed_password, filepath_scb
def main():
wallet_password, seed_words, seed_password, file_path_scb = parse_args()
os.environ['GRPC_SSL_CIPHER_SUITES'] = 'HIGH+ECDSA'
cert = open('/mnt/hdd/lnd/tls.cert', 'rb').read()
ssl_creds = grpc.ssl_channel_credentials(cert)
channel = grpc.secure_channel('localhost:10009', ssl_creds)
stub = lnrpc.WalletUnlockerStub(channel)
if mode == "new":
new(stub, wallet_password)
elif mode == "seed":
seed(stub, wallet_password, seed_words, seed_password)
elif mode == "scb":
scb(stub, wallet_password, seed_words, seed_password, file_path_scb)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3433461
|
<gh_stars>0
from ayeauth import db
from ayeauth.models import BaseModel, _get_uuid
class AuthorizationCode(BaseModel):
__tablename__ = "authorization_codes"
code = db.Column(db.String(36), nullable=False, default=_get_uuid)
expiry = db.Column(db.DateTime(), nullable=False)
state = db.Column(db.String(36), nullable=False)
user_id = db.Column(db.String(36), db.ForeignKey("users.id"))
application_id = db.Column(db.String(36), db.ForeignKey("applications.id"))
def __init__(self, expiry, state, user_id, application_id):
self.expiry = expiry
self.state = state
self.user_id = user_id
self.application_id = application_id
|
StarcoderdataPython
|
6695280
|
<reponame>beikerta/sasmodels<gh_stars>0
r"""
.. warning:: This model and this model description are under review following
concerns raised by SasView users. If you need to use this model,
please email <EMAIL> for the latest situation. *The
SasView Developers. September 2018.*
Definition
----------
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be negligible,
and the size of the paracrystal is infinitely large. Paracrystalline distortion
is assumed to be isotropic and characterized by a Gaussian distribution.
The scattering intensity $I(q)$ is calculated as
.. math::
I(q) = \frac{\text{scale}}{V_p} V_\text{lattice} P(q) Z(q)
where *scale* is the volume fraction of spheres, $V_p$ is the volume of the
primary particle, $V_\text{lattice}$ is a volume correction for the crystal
structure, $P(q)$ is the form factor of the sphere (normalized), and $Z(q)$
is the paracrystalline structure factor for a body-centered cubic structure.
Equation (1) of the 1990 reference\ [#Matsuoka1990]_ is used to calculate
$Z(q)$, using equations (29)-(31) from the 1987 paper\ [#Matsuoka1987]_ for
$Z1$, $Z2$, and $Z3$.
The lattice correction (the occupied volume of the lattice) for a
body-centered cubic structure of particles of radius $R$ and nearest neighbor
separation $D$ is
.. math::
V_\text{lattice} = \frac{16\pi}{3} \frac{R^3}{\left(D\sqrt{2}\right)^3}
The distortion factor (one standard deviation) of the paracrystal is included
in the calculation of $Z(q)$
.. math::
\Delta a = g D
where $g$ is a fractional distortion based on the nearest neighbor distance.
.. figure:: img/bcc_geometry.jpg
Body-centered cubic lattice.
For a crystal, diffraction peaks appear at reduced q-values given by
.. math::
\frac{qD}{2\pi} = \sqrt{h^2 + k^2 + l^2}
where for a body-centered cubic lattice, only reflections where
$(h + k + l) = \text{even}$ are allowed and reflections where
$(h + k + l) = \text{odd}$ are forbidden. Thus the peak positions
correspond to (just the first 5)
.. math::
\begin{array}{lccccc}
q/q_o & 1 & \sqrt{2} & \sqrt{3} & \sqrt{4} & \sqrt{5} \\
\text{Indices} & (110) & (200) & (211) & (220) & (310) \\
\end{array}
.. note::
The calculation of $Z(q)$ is a double numerical integral that must be
carried out with a high density of points to properly capture the sharp
peaks of the paracrystalline scattering. So be warned that the calculation
is slow. Fitting of any experimental data must be resolution smeared for
any meaningful fit. This makes a triple integral which may be very slow.
This example dataset is produced using 200 data points,
*qmin* = 0.001 |Ang^-1|, *qmax* = 0.1 |Ang^-1| and the above default values.
The 2D (Anisotropic model) is based on the reference below where $I(q)$ is
approximated for 1d scattering. Thus the scattering pattern for 2D may not be
accurate, particularly at low $q$. For general details of the calculation and
angular dispersions for oriented particles see :ref:`orientation`. Note that
we are not responsible for any incorrectness of the 2D model computation.
.. figure:: img/parallelepiped_angle_definition.png
Orientation of the crystal with respect to the scattering plane, when
$\theta = \phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).
References
----------
.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)
1754-1765 (Original Paper)
.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)
3854-3856 (Corrections to FCC and BCC lattice structure calculation)
Authorship and Verification
---------------------------
* **Author:** <NAME> **Date:** pre 2010
* **Last Modified by:** <NAME> **Date:** September 29, 2016
* **Last Reviewed by:** <NAME> **Date:** March 21, 2016
"""
import numpy as np
from numpy import inf, pi
name = "bcc_paracrystal"
title = "Body-centred cubic lattic with paracrystalline distortion"
description = """
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = "shape:paracrystal"
#note - calculation requires double precision
single = False
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description" ],
parameters = [["dnn", "Ang", 220, [-inf, inf], "", "Nearest neighbour distance"],
["d_factor", "", 0.06, [-inf, inf], "", "Paracrystal distortion factor"],
["radius", "Ang", 40, [0, inf], "volume", "Particle radius"],
["sld", "1e-6/Ang^2", 4, [-inf, inf], "sld", "Particle scattering length density"],
["sld_solvent", "1e-6/Ang^2", 1, [-inf, inf], "sld", "Solvent scattering length density"],
["theta", "degrees", 60, [-360, 360], "orientation", "c axis to beam angle"],
["phi", "degrees", 60, [-360, 360], "orientation", "rotation about beam"],
["psi", "degrees", 60, [-360, 360], "orientation", "rotation about c axis"]
]
# pylint: enable=bad-whitespace, line-too-long
source = ["lib/sas_3j1x_x.c", "lib/gauss150.c", "lib/sphere_form.c", "bcc_paracrystal.c"]
def random():
"""Return a random parameter set for the model."""
# Define lattice spacing as a multiple of the particle radius
# using the formulat a = 4 r/sqrt(3). Systems which are ordered
# are probably mostly filled, so use a distribution which goes from
# zero to one, but leaving 90% of them within 80% of the
# maximum bcc packing. Lattice distortion values are empirically
# useful between 0.01 and 0.7. Use an exponential distribution
# in this range 'cuz its easy.
radius = 10**np.random.uniform(1.3, 4)
d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius*4/np.sqrt(3)/dnn_fraction
pars = dict(
#sld=1, sld_solvent=0, scale=1, background=1e-32,
dnn=dnn,
d_factor=d_factor,
radius=radius,
)
return pars
# april 6 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!
# add 2d test later
# TODO: fix the 2d tests
q = 4.*pi/220.
tests = [
[{}, [0.001, q, 0.215268], [1.46601394721, 2.85851284174, 0.00866710287078]],
#[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.017, 0.035), 2082.20264399],
#[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.081, 0.011), 0.436323144781],
]
|
StarcoderdataPython
|
11327942
|
# Owner(s): ["oncall: jit"]
import os
import sys
import warnings
import torch
from typing import List, Dict, Optional
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
# NB: There are no tests for `Tuple` or `NamedTuple` here. In fact,
# reassigning a non-empty Tuple to an attribute previously typed
# as containing an empty Tuple SHOULD fail. See note in `_check.py`
def test_annotated_falsy_base_type(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: int = 0
def forward(self, x: int):
self.x = x
return 1
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), (1,))
assert(len(w) == 0)
def test_annotated_nonempty_container(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: List[int] = [1, 2, 3]
def forward(self, x: List[int]):
self.x = x
return 1
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_empty_tensor(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x: torch.Tensor = torch.empty(0)
def forward(self, x: torch.Tensor):
self.x = x
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), (torch.rand(2, 3),))
assert(len(w) == 0)
def test_annotated_with_jit_attribute(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x = torch.jit.Attribute([], List[int])
def forward(self, x: List[int]):
self.x = x
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_class_level_annotation_only(self):
class M(torch.nn.Module):
x: List[int]
def __init__(self):
super().__init__()
self.x = []
def forward(self, y: List[int]):
self.x = y
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_class_level_annotation_and_init_annotation(self):
class M(torch.nn.Module):
x: List[int]
def __init__(self):
super().__init__()
self.x: List[int] = []
def forward(self, y: List[int]):
self.x = y
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_class_level_jit_annotation(self):
class M(torch.nn.Module):
x: List[int]
def __init__(self):
super().__init__()
self.x: List[int] = torch.jit.annotate(List[int], [])
def forward(self, y: List[int]):
self.x = y
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_empty_list(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: List[int] = []
def forward(self, x: List[int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_empty_dict(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: Dict[str, int] = {}
def forward(self, x: Dict[str, int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_empty_optional(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: Optional[str] = None
def forward(self, x: Optional[str]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Wrong type for attribute assignment",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_jit_empty_list(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.jit.annotate(List[int], [])
def forward(self, x: List[int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_jit_empty_dict(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.jit.annotate(Dict[str, int], {})
def forward(self, x: Dict[str, int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_jit_empty_optional(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.jit.annotate(Optional[str], None)
def forward(self, x: Optional[str]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Wrong type for attribute assignment",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_torch_jit_import(self):
from torch import jit
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = jit.annotate(Optional[str], None)
def forward(self, x: Optional[str]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Wrong type for attribute assignment",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
|
StarcoderdataPython
|
8129698
|
<reponame>igushev/fase_lib<gh_stars>1-10
import re
DEMO_PHONE_REGEXP = '\+100000000[0-9][0-9]'
DEMO_ACTIVATION_CODE = 321654
def PhoneNumberIsDemo(phone_number):
return re.fullmatch(DEMO_PHONE_REGEXP, phone_number) is not None
|
StarcoderdataPython
|
1693531
|
<reponame>andrewsmedina/django-admin2
from __future__ import unicode_literals
from blog.views import BlogListView, BlogDetailView
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from djadmin2.site import djadmin2_site
admin.autodiscover()
djadmin2_site.autodiscover()
urlpatterns = [
url(r'^admin2/', include(djadmin2_site.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', BlogListView.as_view(template_name="blog/blog_list.html"), name='blog_list'),
url(r'^blog/detail(?P<pk>\d+)/$', BlogDetailView.as_view(template_name="blog/blog_detail.html"), name='blog_detail'),
url(r'^$', BlogListView.as_view(template_name="blog/home.html"), name='home'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
5085668
|
__all__ = [ "RunTest" ]
|
StarcoderdataPython
|
9681277
|
from pypika.dialects import Query
from weaverbird.backends.pypika_translator.dialects import SQLDialect
from weaverbird.backends.pypika_translator.translators.base import SQLTranslator
class AthenaTranslator(SQLTranslator):
DIALECT = SQLDialect.ATHENA
QUERY_CLS = Query
SUPPORT_ROW_NUMBER = True
SUPPORT_SPLIT_PART = True
SQLTranslator.register(AthenaTranslator)
|
StarcoderdataPython
|
5099691
|
<reponame>SaxenaKartik/Convin-Backend
from django.contrib import admin
from tracker_api import models
# Register your models here.
admin.site.register(models.Task)
admin.site.register(models.TaskTracker)
|
StarcoderdataPython
|
9718521
|
# number = [5, 6, 4, 2, 1, 9]
# max = number[0]
# for x in number:
# if number > x:
# x = number
# print(max)
number = [1, 1, 2, 2, 5, 7, 8, 5, 5]
# number = list(dict.fromkeys(number))
# print(number)
numbers = []
for numb in number:
if numb not in number:
numbers.append(numb)
print(numbers)
|
StarcoderdataPython
|
3213275
|
<filename>backend/readImages.py
import numpy as np
# import pandas as pd
# f = open('dataset/data/imagedata.txt')
# contents = f.read()
# f.close()
checkArray = np.loadtxt('dataset/data/imagedata.npy')
checkArray = checkArray.reshape(1800, 48400)
print(checkArray.shape)
print(checkArray.size)
|
StarcoderdataPython
|
1818412
|
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
from samples.vsphere.common.vim.file import (detect_file, delete_file,
parse_datastore_path)
from samples.vsphere.common.vim.inventory import get_datastore_mo
from samples.vsphere.common.vim import datastore_file
def setup_floppy_image(context):
"""Copy floppy image used to run vcenter samples"""
floppy_src_url = context.testbed.config['FLOPPY_SRC_URL']
datacenter_name = context.testbed.config['FLOPPY_DATACENTER_NAME']
datastore_path = context.testbed.config['FLOPPY_DATASTORE_PATH']
(datastore_name, path) = parse_datastore_path(datastore_path)
datastore_mo = get_datastore_mo(context.client,
context.service_instance._stub,
datacenter_name,
datastore_name)
if not datastore_mo:
raise Exception("Could not find datastore '{}'".format(datastore_name))
# See if the Floppy image exists. Copy it into the system if it does not
# exist
dsfile = datastore_file.File(datastore_mo)
if not dsfile.exists(datastore_path):
print("Putting Floppy file from '{}' at '{}'".
format(floppy_src_url, datastore_path))
dsfile.put(path=path, src_url=floppy_src_url)
def cleanup_floppy_image(context):
"""Delete floppy image after running samples"""
datacenter_name = context.testbed.config['FLOPPY_DATACENTER_NAME']
datastore_path = context.testbed.config['FLOPPY_DATASTORE_PATH']
delete_file(context.client,
context.service_instance,
'Floppy Image',
datacenter_name,
datastore_path)
def detect_floppy_image(context):
"""Find the floppy image used to run vcenter samples"""
datacenter_name = context.testbed.config['FLOPPY_DATACENTER_NAME']
datastore_path = context.testbed.config['FLOPPY_DATASTORE_PATH']
return detect_file(context, 'Floppy Image', datacenter_name, datastore_path)
def setup(context):
setup_floppy_image(context)
def cleanup(context):
cleanup_floppy_image(context)
def validate(context):
return detect_floppy_image(context)
|
StarcoderdataPython
|
8188956
|
<gh_stars>1-10
import discord
from discord_components import Button, ButtonStyle
import modules.buttons.globals as global_values
class ComponentMessage:
def __init__(self, actionrows, **kwargs):
self.temporary = kwargs["temporary"] if "temporary" in kwargs else True
self.components = []
i = 0
for row in actionrows:
self.components.append([])
for component in row:
self.components[len(self.components) - 1].append(FullButton(self, i, **component))
i += 1
self.message = None
self.embed = None
self.content= None
# Envoie le choix
async def send(self, _channel, **kwargs):
self.embed = kwargs["embed"] if "embed" in kwargs else None
self.content = kwargs["content"] if "content" in kwargs else None
if not (self.content or self.embed):
raise Exception("Missing content or embed")
self.message = await _channel.send(self.content, embed=self.embed, components=self.components)
#Met à jour le message
async def update(self, actionrows=None):
if not self.message:
return
if actionrows:
for row in self.components:
for component in row:
component.delete()
self.components = []
i = 0
for row in actionrows:
self.components.append([])
for component in row:
self.components[len(self.components) - 1].append(FullButton(self, i, **component))
i += 1
await self.message.edit(self.content, embed=self.embed, components=self.components)
# Supprime l'objet (et le message si force=True)
async def delete(self, force=False, keepButtons=False):
if self.temporary or force:
await self.message.delete()
elif not keepButtons:
await self.message.edit(components=[])
for row in self.components:
for component in row:
component.delete()
del component
class FullButton(Button):
def __init__(self, _componentMessage, index, **kwargs):
self.effect = kwargs.pop("effect")
self.cond = kwargs.pop("cond")
super().__init__(**kwargs)
self.componentMessage = _componentMessage
self.index = index
global_values.components[self.id] = self
async def disable(self):
self.disabled = True
await self.componentMessage.update()
async def enable(self):
self.disabled = False
await self.componentMessage.update()
def delete(self):
del global_values.components[self.id]
# Module créé par Le Codex#9836
|
StarcoderdataPython
|
3515855
|
<filename>src/models/ops/conv_blocks.py
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.nn import relu6
from models.ops import conv_ops as ops
# Bloque comun de convolucion que consiste:
# > conv2d
# > batch normalization
# > activation
# > dropout
class basic_conv_block(layers.Layer):
# Args:
# filters: entero con el numero de filtros de la convolucion
# kernel: par de numeros con tamaño del kernel
# strides: arreglo de strides
# dropout: fraccion a la cual se le va aplicar dropout
# activation: el topo de activacion de la capa
def __init__(self,
filters,
kernel,
stride=1,
dropout=0.25,
activation="ReLU",
name="conv_block", **kwargs):
super(basic_conv_block, self).__init__(name=name, **kwargs)
self.filters = filters
self.kernel = kernel
self.strides = [1, stride, stride, 1]
self.dropout = dropout
self.activation = activation
self.conv = ops.normal_conv(filters, kernel, strides=self.strides, name=name+"_conv2d")
self.bn = layers.BatchNormalization(name=name+"_bn")
if self.activation == "ReLU":
self.activation = layers.Activation("relu", name=name+"_relu")
if self.activation == "ReLU6":
self.activation = ops.ReLU6(name=name+"_relu6")
self.dropout = layers.Dropout(dropout, name=name+"_dropout")
#
# serializa la configuracion de la capa
def get_config(self):
config = super(basic_conv_block, self).get_config()
config.update({
"filters": self.filters,
"kernel": self.kernel,
"stride": self.strides[1],
"dropout": self.dropout,
"activation": self.activation
})
return config
def call(self, inputs, training=None):
# Operacion depth wise
x = self.conv(inputs)
x = self.bn(x)
x = self.activation(x)
if training == True:
x = self.dropout(x)
return x
# Bloque de pointwise convolution
# > pointwise
# > batch normalization
# > activation
# > dropout
class pwise_conv_block(layers.Layer):
# Args:
# filters: entero con el numero de filtros de la convolucion
# strides: arreglo de strides
# dropout: fraccion a la cual se le va aplicar dropout
# activation: el topo de activacion de la capa
def __init__(self,
filters,
stride=1,
dropout=0.25,
activation="ReLU",
name="pwise_conv_block", **kwargs):
super(pwise_conv_block, self).__init__(name=name, **kwargs)
self.filters = filters
self.strides = [1, stride, stride, 1]
self.dropout = dropout
self.activation = activation
self.conv = ops.pointwise_conv(filters, strides=self.strides, name=name+"_pwise_conv")
self.bn = layers.BatchNormalization(name=name+"_bn")
if self.activation == "ReLU":
self.activation = layers.Activation("relu", name=name+"_relu")
if self.activation == "ReLU6":
self.activation = ops.ReLU6(name=name+"_relu6")
self.dropout = layers.Dropout(dropout, name=name+"_dropout")
#
# serializa la configuracion de la capa
def get_config(self):
config = super(pwise_conv_block, self).get_config()
config.update({
"filters": self.filters,
"stride": self.strides[1],
"dropout": self.dropout,
"activation": self.activation
})
return config
def call(self, inputs, training=None):
# Operacion depth wise
x = self.conv(inputs)
x = self.bn(x)
x = self.activation(x)
if training == True:
x = self.dropout(x)
return x
#
# Bloque de capas de mobilenetV1 que realiza lo siguiente
# > 3x3 Depthwise conv, stride=(1|2)
# > Batch Normalization
# > ReLU Activation
# > 1x1xfilters Conv (Pointwise conv)
# > Batch Normalization
# > ReLU Activations
#
class separable_conv_block(layers.Layer):
#
# Crea el bloque segun los argumentos
# Args:
# filters: numero de filtros que realizara la Pointwise Conv
# stride: stride de la layer Depthwise Conv, 1 o 2
# name: nombre del bloque
#
def __init__(self,
filters,
stride,
dropout=0.25,
name="separable_conv_block", **kwargs):
super(separable_conv_block, self).__init__(name=name, **kwargs)
self.filters = filters
self.stride = stride
self.dropout = dropout
# Asegura de que el filters sea un entero
if type(filters) is float:
filters = int(filters)
# deptwise operation
self.dwise = ops.depthwise_conv((3,3), strides=[1, stride, stride, 1])
self.dwbn = layers.BatchNormalization()
self.dwrelu = layers.Activation("relu")
self.dwdo = layers.Dropout(dropout)
#point wise operation
self.pwise = ops.pointwise_conv(filters)
self.pwbn = layers.BatchNormalization()
self.pwrelu = layers.Activation("relu")
self.pwdo = layers.Dropout(dropout)
#
# serializa la configuracion de la capa
def get_config(self):
config = super(separable_conv_block, self).get_config()
config.update({
"filters": self.filters,
"stride": self.stride,
"dropout": self.dropout
})
return config
def call(self, inputs, training=None):
# Operacion depth wise
x = self.dwise(inputs)
x = self.dwbn(x)
x = self.dwrelu(x)
if training == True:
x = self.dwdo(x)
# Luego point wise convolution
x = self.pwise(x)
x = self.pwbn(x)
x = self.pwrelu(x)
if training == True:
x = self.pwdo(x)
return x
#
# Bloque basico para MobileNetV2, realiza lo siguiente:
# > (1x1xinput_channels*t) conv
# > Batch Normalization
# > ReLU6
# > 3x3 Depthwise conv, stride=(1|2)
# > Batch Normalization
# > ReLU6
# > (1x1xoutput_channels) conv
# > Si stride == 1 entonces residual = output + input
#
class BottleneckResidualBlock(layers.Layer):
#
# Crea el bloque segun los argumentos
# Args:
# input_channels: numero de channels que entraran al bloque
# filters: numero de filtros del volumen final
# stride: stride de la layer Depthwise Conv, 1 o 2
# t: expansion factor, por defecto 6
# dropout: cantidad de dropout que se realizara
# name: nombre del bloque
#
def __init__(self,
input_channels,
filters,
stride=1,
t=6,
dropout=0.25,
store_output=False,
name="BottleneckResidualBlock", **kwargs):
super(BottleneckResidualBlock, self).__init__(name=name, **kwargs)
# Asegura de que el input_channels sea un entero
if type(input_channels) is float:
input_channels = int(input_channels)
# Asegura de que el filters sea un entero
if type(filters) is float:
filters = int(filters)
self.input_channels = input_channels
self.output_channels = filters
self.stride = stride
self.t = t
self.dropout = dropout
self.store_output = store_output
self.expansion_output = None
self.block_output = None
self.pw_exp = ops.pointwise_conv(input_channels * t, name=name + "_expansion_conv")
self.bn_exp = layers.BatchNormalization(name=name+"_expansion_bn")
self.do_exp = layers.Dropout(self.dropout, name=name+"_expansion_do")
self.dwise = ops.depthwise_conv((3,3), strides=[1, stride, stride, 1], name=name+"_depthwise_conv")
self.bn_dwise = layers.BatchNormalization(name=name+"_depthwise_bn")
self.do_dwise = layers.Dropout(self.dropout, name=name+"_depthwise_do")
self.pw_bottleneck = ops.pointwise_conv(self.output_channels, name=name+"_bottleneck_conv")
self.bn_bottleneck = layers.BatchNormalization(name=name+"_bottleneck_bn")
self.do_bottleneck = layers.Dropout(self.dropout, name=name+"_bottleneck_do")
# En caso de que el input y output no concuerden,
# se realiza un 1x1 conv para que concuerdes
# if self.input_channels != self.output_channels:
# self.pw_residual = ops.pointwise_conv(self.output_channels)
#
# serializa la configuracion de la capa
def get_config(self):
config = super(BottleneckResidualBlock, self).get_config()
config.update({
"input_channels": self.input_channels,
"filters": self.output_channels,
"stride": self.stride,
"t": self.t,
"dropout": self.dropout,
"store_output": self.store_output
})
return config
def call(self, inputs, training=None):
residual = inputs
# Expansion de los channels de entrada
x = self.pw_exp(inputs)
x = self.bn_exp(x)
x = relu6(x)
if training == True:
x = self.do_exp(x)
res_expansion = x
# Realisamos la depthwise convolution
x = self.dwise(x)
x = self.bn_dwise(x)
x = relu6(x)
if training == True:
x = self.do_dwise(x)
res_depthwise = x
# Bottleneck para reducir los channels de salida
x = self.pw_bottleneck(x)
x = self.bn_bottleneck(x)
# checa si hay que sumar el residual
if self.stride == 1:
if self.input_channels == self.output_channels:
x = x + residual
#residual = self.pw_residual(residual)
#x = x + residual
if training == True:
x = self.do_bottleneck(x)
res_bottleneck = x
return res_bottleneck
|
StarcoderdataPython
|
1873864
|
import numpy as np
from scipy import integrate
from matplotlib.pylab import *
import matplotlib.pyplot as plt
'''
Stiff combustion equation
'''
def combustion(t,y):
n = len(y)
dydt = np.zeros((n,1))
#dydt[0] = -15*y
dydt[0] = y**2 - y**3
return dydt
# The ``driver`` that will integrate the ODE(s):
if __name__ == '__main__':
backend = 'dopri5'
#backend = 'dop853'
r = integrate.ode(combustion).set_integrator(backend,rtol=1e-4, nsteps=1500,
first_step=1e-6, max_step=1e-1, verbosity=True)
t_start=0.0
t_final=2000.0
delta_t=1
num_steps = np.floor((t_final - t_start)/delta_t) + 1
y0=0.001
r.set_initial_value([y0], t_start)
t = np.zeros((int(num_steps), 1))
y = np.zeros((int(num_steps), 1))
t[0] = t_start
y[0] = y0
sol = []
k = 1
while r.successful() and k < num_steps:
r.integrate(r.t + delta_t)
# Store the results to plot later
t[k] = r.t
y[k] = r.y[0]
k += 1
sol.append([r.t, r.y])
sol = np.array(sol)
# All done! Plot the trajectories:
plt.plot(t, y, 'bo')
ylim([-0.1,0.5])
grid('on')
xlabel('Time [minutes]')
ylabel('Concentration [mol/L]')
plt.show()
|
StarcoderdataPython
|
6640460
|
from alembic import op
"""Drop UserObservation unique constraint in favor of history
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2018-04-06 04:47:18.518343
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(
u'_user_observation', 'user_observations', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(
u'_user_observation', 'user_observations',
['user_id', 'observation_id'])
# ### end Alembic commands ###
|
StarcoderdataPython
|
11275326
|
# Generated by Django 3.2.4 on 2021-06-16 18:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0013_auto_20210616_2305'),
]
operations = [
migrations.RenameModel(
old_name='Questions',
new_name='QuizQuestions',
),
]
|
StarcoderdataPython
|
9697876
|
from Clases.Helpers import Helpers
from Clases.QLDBDriver import QLDBDriver
from Clases.Password import Password
class Service():
def __init__(self):
self.password = Password()
self.db = QLDBDriver()
def validate_user(self, user, password):
user_data = self.get_user(user)
if user_data is None:
return False
user_password = user_data["password"]
valid = self.password.verify_password(user_password, password)
if not valid:
return False
return True
def create_user(self, user, password):
password_hash = self.password.hash_password(password)
data_to_save = {
"username" : user,
"password" : password_<PASSWORD>
}
self.db.create_insert("users", data_to_save)
def get_user(self, username):
query = "SELECT * FROM users WHERE username = '{}'".format(username)
data = self.db.create_query(query)
if len(data) > 0:
return data[0]
return None
def save_ws_log(self, request, response):
data_to_save = {
"request": Helpers.remove_special(request),
"response": Helpers.remove_special(response)
}
self.db.create_insert("ws_log", data_to_save)
|
StarcoderdataPython
|
5010266
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
from pbr.packaging import parse_requirements
entry_points = {
}
setuptools.setup(
name='placementclient',
version='0.3.0',
description=('Client for the Placement API'),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/NeCTAR-RC/python-placementclient',
packages=[
'placementclient',
],
include_package_data=True,
install_requires=parse_requirements(),
license="Apache",
zip_safe=False,
classifiers=(
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
),
entry_points=entry_points,
)
|
StarcoderdataPython
|
9792563
|
import mkdocs_gen_files
import os
import glob
# iterate over pages and append glossary
for file in glob.glob("/docs/docs/**/*.md", recursive = True):
if file.endswith('.md'):
text = open(file).read()
with mkdocs_gen_files.open(file.replace('/docs/docs/', ''), "w") as f:
print(text + '\n--8<-- "./glossary.md"', file=f)
|
StarcoderdataPython
|
6484546
|
import os
import docx
from docx.shared import Inches
class doc:
def __init__(self, filesize, filename,generator,WORD_SEPARATOR):
self.filesize = filesize
self.filename = filename
self.generator = generator
self.word_separator = WORD_SEPARATOR
def execute(self):
for i in range(0,len(self.filename)):
statinfo = 0
file_size = int(self.filesize[i])
file_name = self.filename[i]
doc = docx.Document()
doc.save(file_name)
while statinfo < file_size:
#f = open(file_name,"a")
doc = docx.Document(file_name)
temptxt = ""
temptxt = self.generator.generate(self.word_separator)
#print(temptxt)
doc.add_heading('Heading, level 1', level=1)
for i in range(0, 1000):
#f.write(self.generator.generate(self.word_separator))
temptxt = temptxt + " " + self.generator.generate(self.word_separator)
try:
doc.add_paragraph(temptxt)
except ValueError as e:
print("ATTENTION:"+temptxt)
doc.add_picture('1.jpg')
doc.add_paragraph('first item in unordered list', style='ListBullet')
doc.add_paragraph('first item in ordered list', style='ListNumber')
#f.close()
doc.save(file_name)
statinfo = os.stat(file_name).st_size
print(statinfo)
|
StarcoderdataPython
|
4853168
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 14:45:53 2022
@author: fatemehmohebbi
"""
from Bio import Phylo
import networkx as nx
import numpy as np
import pandas as pd
from scipy import sparse
import scipy.special as sc
import matplotlib.pyplot as plt
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
import math, statistics
from networkx.algorithms.tree import Edmonds
def find_parents(tree):
parents = []
for leaf in tree.find_clades(terminal=True, order="level"):
parents.append(tree.get_path(leaf)[-2])
def to_adjacency_matrix(tree):
"""Create an adjacency matrix (NumPy array) from clades/branches in tree"""
allclades = list(tree.find_clades(order="level"))
lookup = {}
for i, elem in enumerate(allclades):
lookup[elem] = i
adjmat = np.zeros((len(allclades), len(allclades)))
for parent in tree.find_clades(terminal=False, order="level"):
for child in parent.clades:
adjmat[lookup[parent], lookup[child]] = 1
if not tree.rooted:
# Branches can go from "child" to "parent" in unrooted trees
adjmat = adjmat + adjmat.transpose()
return (allclades, np.matrix(adjmat))
def log_choose_k(n, k, flag):
if flag == 'int':
l = sum(np.log(np.array(range(1,n)))) - sum(np.log(np.array(range(1,n - k)))) - \
sum(np.log(np.array(range(1,k))))
if flag == 'real':
l = sc.gammaln(n + 1) - sc.gammaln(k + 1) - sc.gammaln(n - k + 1)
return l
def get_ids(patients):
"""the ids may be different for different trees, fix it"""
ids = []
for name in patients:
try:
ids.append(name.split('|')[1])
except:
ids.append(name)
return ids
def read_tree(tree_, meta_file, tree_type):
"""read the tree file and return adjacency and weight matrices,
and the list of patients"""
print('Reading data ...')
metadata = pd.read_csv(meta_file)
metadata = metadata.set_index('name')
tree = Phylo.read(tree_, tree_type)
graph = Phylo.to_networkx(tree)
#adjacency_mat.toarray() to read the mat
# weight_mat = nx.adjacency_matrix(graph, weight='weight')
# adjacency_mat = sparse.csr_matrix(np.sign(weight_mat.toarray()))
weight_mat = nx.to_numpy_matrix(graph, weight='weight')
adjacency_mat = sparse.csr_matrix(np.sign(weight_mat))
patients_ids= []
# dates = []
for node in graph.nodes:
if node.name is not None:
# dates.append(metadata['date'].loc[node.name])
patients_ids.append(str(metadata['host'].loc[node.name]))
else:
# dates.append(np.nan)
patients_ids.append('None')
return adjacency_mat, weight_mat, patients_ids
def tree_reduction(adjacency_mat, weight_mat, patients):
"""the children of each node (leaves as children) are removed if they
have the same label"""
# ids = get_ids(patients)
print('Tree reduction ...')
ids = patients[:]
G = nx.from_numpy_matrix(np.triu(weight_mat), create_using=nx.DiGraph)
for i in list(G.in_degree):
if i[1] == 0:
root = i[0]
dfs_order = list(nx.dfs_preorder_nodes(G, source=root))
dfs_order.reverse()
#are labels matching nodes properly?
nodes_removed = []
for i in dfs_order:
if G.out_degree(i) == 0:
continue
else:
child = list(G.successors(i))
if (ids[child[0]] == ids[child[1]]) & (ids[child[0]] != 'None'):
ids[i] = ids[child[0]]
nodes_removed.extend((child[0], child[1]))
ids[child[0]] = '-1'
ids[child[1]] = '-1'
G.remove_nodes_from(nodes_removed)
weight_mat = nx.to_numpy_matrix(G, weight='weight')
adjacency_mat = sparse.csr_matrix(np.sign(weight_mat))
return adjacency_mat, weight_mat, [x for x in ids if x != '-1']
def get_consensus_net(networks, networks_likelihood):
"""calculates consensus network for all networks given as input"""
inf_nets = np.where(networks_likelihood == -math.inf)
if len(inf_nets[0]) != 0:
networks_likelihood = np.delete(networks_likelihood, inf_nets[0])
networks = np.delete(networks, inf_nets[0], 0)
mean_value = statistics.mean(networks_likelihood)
avg_added_likel = networks_likelihood[:] - mean_value
weight_mats = [i * math.exp(j) for i, j in zip(networks, avg_added_likel)]
weight_sum = weight_mats[0]
for net_i in weight_mats[1:]:
weight_sum = np.add(weight_sum, net_i)
G = nx.from_numpy_matrix(weight_sum, create_using=nx.DiGraph)
edmonds = Edmonds(G)
T = edmonds.find_optimum(attr='weight', kind='max')
tree_adj_mat = nx.to_numpy_matrix(T) #nx.adjacency_matrix(T).toarray()
return np.where(tree_adj_mat != 0, 1, 0)
def plot_network(the_consensus, patients, output_dir):
"""plot and save the transmission network"""
G = nx.from_numpy_matrix(the_consensus, create_using=nx.DiGraph)
pos = graphviz_layout(G, prog = 'dot') #"twopi")
ids = sorted(list(set(patients)))[:-1]
labels = {}
count = 0
for i in ids:
labels[count] = str(i)
count = count + 1
nx.draw(G, pos)
nx.draw_networkx_labels(G, pos, labels, font_size=7.5)
plt.savefig(output_dir + 'transmission_network.svg')
print('Transmission network is saved as transmission_network.svg')
plt.show()
|
StarcoderdataPython
|
3518345
|
import json
def lambda_handler(event, context):
#debugEvent(event)
#.1 Parse the querystring parameters
params = readParams(event)
#.2 Construct the body of the response
responseBody = {
"transactionId": params["transactionId"],
"transactionType": params["transactionType"],
"transactionAmount": params["transactionAmount"],
"message": "Hello from Lambda"
}
#.3 Return the HTTP response
return {
'statusCode': 200,
"headers": [
{"Content-Type": "application/json"}
],
'body': json.dumps(responseBody)
}
def readParams(event):
if not ("queryStringParameters" in event):
raise Exception("Querystring is missed")
params = event["queryStringParameters"]
return {
"transactionId": readParam(params, "transactionId"),
"transactionType": readParam(params, "type"),
"transactionAmount": readParam(params, "amount")
}
def readParam(params, paramName):
if paramName in params: return params[paramName]
else: raise Exception(f"{paramName} in Querystring is missed")
def debugEvent(event):
print("debugEvent")
for x in event:
print(x)
|
StarcoderdataPython
|
9710475
|
import sys
from flask import Flask
app = Flask(__name__)
@app.route("/")
def main():
with open("pylabs.html") as f:
return f.read()
@app.route("/favicon.ico")
def favicon():
with open("favicon.ico", "rb") as fb:
return fb.read()
@app.route("/hello")
def hello():
return "Hello, world."
|
StarcoderdataPython
|
4852171
|
# This is where the classes and objects are defined
import random
class Game:
def __init__(self, difficulty, length, cave_map):
self.cave_map = cave_map
self.difficulty = difficulty
self.length = length
class Condition:
def __init__(self, name, damage, ac_reduction, duration):
self.name = name
self.damage = damage
self.ac_reduction = ac_reduction
self.duration = duration
poisoned = Condition(name='poisoned', damage=1, duration=0, ac_reduction=0)
hobbled = Condition(name='hobbled', damage=0, duration=1, ac_reduction=0) # not implemented
blind = Condition(name='blind', damage=0, duration=1, ac_reduction=-4) # not implemented
conditions = [poisoned, hobbled, blind]
class Weapon:
def __init__(self, name, attack_bonus, damage, value, apply_condition):
self.name = name
self.attack_bonus = attack_bonus
self.damage = damage
self.value = value
self.apply_condition = apply_condition
unarmed = Weapon(name='fists', attack_bonus=0, damage=1, value=0, apply_condition=0)
rusty_dagger = Weapon(name='rusty dagger', attack_bonus=0, damage=2, value=50, apply_condition=0)
steel_sword = Weapon(name='steel sword', attack_bonus=1, damage=4, value=200, apply_condition=0)
bow = Weapon(name='bow', attack_bonus=4, damage=3, value=300, apply_condition=0)
poisoned_fangs = Weapon(name='poisoned fangs', attack_bonus=2, damage=1, value=0, apply_condition=poisoned)
wand = Weapon(name='wand', attack_bonus=5, damage=5, value=400, apply_condition=0)
weapons = [unarmed, rusty_dagger, steel_sword, bow, poisoned_fangs, wand]
class Enemy:
def __init__(self, name, max_hp, hp, ac, attack, weapon, xp_worth):
self.name = name
self.max_hp = max_hp
self.hp = hp
self.ac = ac
self.attack = attack
self.weapon = weapon
self.xp_worth = xp_worth
goblin = Enemy(name="Goblin", max_hp=4, hp=4, ac=12, attack=4, weapon=rusty_dagger, xp_worth=1)
goblin_champion = Enemy(name="<NAME>", max_hp=6, hp=6, ac=14, attack=4, weapon=steel_sword, xp_worth=3)
kobold_archer = Enemy(name="<NAME>", max_hp=3, hp=3, ac=10, attack=6, weapon=bow, xp_worth=2)
spider = Enemy(name="Spider", max_hp=3, hp=3, ac=11, attack=5, weapon=poisoned_fangs, xp_worth=3)
enemy_types = [goblin, goblin_champion, kobold_archer, spider]
class PlayerCharacter:
def __init__(self, max_hp, hp, ac, attack, weapon, xp, level, location, condition, class_, killed):
self.max_hp = max_hp
self.hp = hp
self.ac = ac
self.attack = attack
self.weapon = weapon
self.xp = xp
self.level = level
self.location = location
self.condition = condition
self.class_ = class_
self.killed = killed
def sheet(self):
print(f'######################################')
print(f'# {self.class_} HP {self.hp}/{self.max_hp} AC {self.ac} ATT {self.attack} XP {self.xp}')
print(f'# {self.weapon.name.capitalize()} equipped DMG {self.weapon.damage} ATT {self.weapon.attack_bonus}')
print(f'# Enemies killed {self.killed}')
if self.condition != 0:
print(f'# {self.condition.name} ')
print(f'######################################')
print('')
ranger = PlayerCharacter(max_hp=8, hp=8, ac=12, attack=1, weapon=bow, xp=0, level=1,
location=None, condition=0, class_='Ranger', killed=0)
fighter = PlayerCharacter(max_hp=10, hp=10, ac=14, attack=2, weapon=steel_sword, xp=0, level=1,
location=None, condition=0, class_='Fighter', killed=0)
wizard = PlayerCharacter(max_hp=7, hp=7, ac=10, attack=5, weapon=wand, xp=0, level=0,
location=None, condition=0, class_='Wizard', killed=0)
player_classes = [ranger, fighter, wizard]
class Tile:
def __init__(self, ways_out, trap_type, text_description, enemy, link0, link1, link2, link3, visited, start_tile):
self.ways_out = ways_out
self.trap_type = trap_type
self.text_description = text_description
self.enemy = enemy
self.link0 = link0 # link to back/previous tile
self.link1 = link1 # link to next tile left if 1 or 3 ways out
self.link2 = link2 # link to next tile forward if 1 or 3 ways out
self.link3 = link3 # link to next tile right if 3 ways out
self.visited = visited
self.start_tile = start_tile
cave_word1 = ['a dimly lit', 'an ominously dark', 'an eerily quiet',
'an uncomfortably cold', 'a horribly humid']
cave_word2 = ['corridor', 'spot', 'room', 'cavern']
cave_word3 = ['with damp walls', 'with a slippery floor', 'traversed by a snaking creek']
def cave_description():
description = random.choice(cave_word1) + " " + random.choice(cave_word2) + " " + random.choice(cave_word3)
return description
|
StarcoderdataPython
|
5008172
|
from tts_pipeline.pipelines.waterfall.pipeline import WaterfallPipeline
from tts_pipeline.pipelines.waterfall.models.UnifiedKeywordExtractor import UnifiedKeywordExtractor
from tts_pipeline.pipelines.waterfall.models.gnews_models import GNewsWaterfallEmbedder
from tts_pipeline.pipelines.waterfall.models.examples import DummyWaterfallDimensionalityReducer
from tts_websocketserver.utils import assets_folder
import os
def get_pipeline():
return WaterfallPipeline(
keyword_extractor = UnifiedKeywordExtractor(
target_words = ["Bright","Dark","Full","Hollow","Smooth","Rough","Warm","Metallic","Clear","Muddy","Thin","thick","Pure","Noisy","Rich","Sparse","Soft","Hard"],
ner_model_path = os.path.join(assets_folder, "ner_model")
),
embedder = GNewsWaterfallEmbedder(), # this is very small, so it runs fast
dimensionality_reducer = DummyWaterfallDimensionalityReducer())
|
StarcoderdataPython
|
181004
|
<gh_stars>1000+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
import os
import pickle
import random
from typing import Optional
import numpy as np
from torch.utils.data import Dataset
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
__all__ = ["BertInformationRetrievalDataset"]
class BaseInformationRetrievalDataset(Dataset):
"""
Base information retrieval dataset on which other datasets are built.
Args:
tokenizer: tokenizer
max_query_length: maximum length of query in tokens
max_passage_length: maximum length of passage in tokens
"""
def __init__(
self, tokenizer: TokenizerSpec, max_query_length: Optional[int] = 31, max_passage_length: Optional[int] = 190,
):
self.tokenizer = tokenizer
self.max_query_length = max_query_length
self.max_passage_length = max_passage_length
def parse_npz(self, file, max_seq_length):
"""
Function which parses passages (documents) in npz format.
After pre-processing and tokenization, the dataset will be saved
as numpy matrix, i_th entry of which corresponds to i_th passage (document)
and has the following form:
[n, token_1, ..., token_n, 0, ..., 0]
where n is the passage length (in tokens) and 0s correspond to pad tokens.
Args:
file: str, path to file with passages (documents)
max_seq_length: maximum length of sequence in tokens
"""
cached_collection = file + ".npz"
if os.path.isfile(cached_collection):
dataset_npz = np.load(cached_collection)["data"]
else:
dataset_dict = self.tokenize_dataset(file, max_seq_length)
dataset_npz = np.zeros((len(dataset_dict), max_seq_length + 1))
for key in dataset_dict:
dataset_npz[key][0] = len(dataset_dict[key])
dataset_npz[key][1 : len(dataset_dict[key]) + 1] = dataset_dict[key]
np.savez(cached_collection, data=dataset_npz)
return dataset_npz
def parse_pkl(self, file, max_seq_length):
"""
Function which parses passages (documents, queries) in pkl format.
After pre-processing and tokenization, the dataset will be saved
as pkl dict, i_th entry of which corresponds to i_th passage (document, query)
and has the following form:
{passage_id: [token_1, ..., token_n]}
where n is the passage length (in tokens).
Args:
file: str, path to file with passages (documents)
max_seq_length: maximum length of sequence in tokens
"""
cached_collection = file + ".pkl"
if os.path.isfile(cached_collection):
dataset_dict = pickle.load(open(cached_collection, "rb"))
else:
dataset_dict = self.tokenize_dataset(file, max_seq_length)
pickle.dump(dataset_dict, open(cached_collection, "wb"))
return dataset_dict
def tokenize_dataset(self, file, max_seq_length):
"""
Function which pre-tokenizes the dataset.
"""
lines = open(file, "r").readlines()
with mp.Pool() as pool:
dataset_dict = pool.map(self.preprocess_line, lines)
dataset_dict = {id_: tokens[:max_seq_length] for (id_, tokens) in dataset_dict}
return dataset_dict
def preprocess_line(self, line):
"""
Parse a single entry (line) of tsv file.
"""
if "\t" not in line:
raise ValueError(f"Provided dataset does not have a form of tsv file")
id_, text = line.split("\t")
token_ids = self.tokenizer.text_to_ids(text.strip())
return int(id_), token_ids
def construct_input(self, token_ids1, max_seq_length, token_ids2=None):
"""
Function which constructs a valid input to BERT from tokens.
If only one list of tokens (token_ids1) is passed, the input will be
[CLS] token_ids1 [SEP]
if two lists of tokens are passed, the input will be
[CLS] token_ids1 [SEP] token_ids2 [SEP]
"""
input_ids = [self.tokenizer.pad_id] * max_seq_length
bert_input = [self.tokenizer.cls_id] + token_ids1 + [self.tokenizer.sep_id]
sentence1_length = len(bert_input)
if token_ids2 is not None:
bert_input = bert_input + token_ids2 + [self.tokenizer.sep_id]
bert_input = bert_input[:max_seq_length]
num_nonpad_tokens = len(bert_input)
input_ids[:num_nonpad_tokens] = bert_input
input_ids = np.array(input_ids, dtype=np.long)
input_mask = input_ids != self.tokenizer.pad_id
input_type_ids = np.ones_like(input_ids)
input_type_ids[:sentence1_length] = 0
return input_ids, input_mask, input_type_ids
def preprocess_bert(self, query_id, psg_ids):
"""
Transforms query id (Q) and a list of passages ids (P1, ..., Pk)
into a tensor of size [k, max_length] with the following rows:
[CLS] Q_text [SEP] Pi_text [SEP], i = 1, ..., k
"""
max_seq_length = self.max_query_length + self.max_passage_length + 3
input_ids, input_mask, input_type_ids = [], [], []
for psg_id in psg_ids:
inputs = self.construct_input(self.queries[query_id], max_seq_length, self._psgid2tokens(psg_id))
input_ids.append(inputs[0])
input_mask.append(inputs[1])
input_type_ids.append(inputs[2])
input_ids = np.stack(input_ids)
input_mask = np.stack(input_mask)
input_type_ids = np.stack(input_type_ids)
return input_ids, input_mask, input_type_ids
def preprocess_dpr(self, query_id, psg_ids):
"""
Transforms query id (Q) and a list of passages ids (P1, ..., Pk)
into two tensors of sizes [1, max_q_length] and [k, max_p_length]
with the following rows:
1) [CLS] Q_text [SEP]
2) [CLS] Pi_text [SEP], i = 1, ..., k
"""
q_input_ids, q_input_mask, q_type_ids = self.construct_input(self.queries[query_id], self.max_query_length + 2)
input_ids, input_mask, input_type_ids = [], [], []
for psg_id in psg_ids:
inputs = self.construct_input(self._psgid2tokens(psg_id), self.max_passage_length + 2)
input_ids.append(inputs[0])
input_mask.append(inputs[1])
input_type_ids.append(inputs[2])
input_ids = np.stack(input_ids)
input_mask = np.stack(input_mask)
input_type_ids = np.stack(input_type_ids)
return (
q_input_ids[None, ...],
q_input_mask[None, ...],
q_type_ids[None, ...],
input_ids,
input_mask,
input_type_ids,
)
def _psgid2tokens(self, psg_id):
"""
Internal function which maps passage id to its tokens.
"""
pass
def psgid2tokens_npz(self, psg_id):
"""
Mapping from passage id to its tokens in case of npz cache format.
"""
seq_len = self.passages[psg_id][0]
return self.passages[psg_id][1 : seq_len + 1].tolist()
def psgid2tokens_pkl(self, psg_id):
"""
Mapping from passage id to its tokens in case of pkl cache format.
"""
return self.passages[psg_id]
class BertInformationRetrievalDataset(BaseInformationRetrievalDataset):
def __init__(
self,
tokenizer: TokenizerSpec,
passages: str,
queries: str,
query_to_passages: str,
max_query_length: Optional[int] = 31,
max_passage_length: Optional[int] = 190,
num_negatives: Optional[int] = 10,
preprocess_fn: Optional[str] = "preprocess_bert",
psg_cache_format: Optional[str] = "npz",
):
"""
Dataset for training information retrieval models.
Args:
tokenizer: tokenizer
passages: path to tsv with [psg_id, psg_text] entries
queries: path to tsv with [query_id, query_text] entries
query_to_passages: path to tsv with
[query_id, pos_psg_id, neg_psg_id_1, ..., neg_psg_id_k] entries
max_query_length: maximum length of query in tokens
max_passage_length: maximum length of passage in tokens
num_negatives: number of negative passages per positive to use for training
preprocess_fn: either preprocess_bert or preprocess_dpr
preprocess_bert: joint input: [CLS] query [SEP] passage [SEP]
preprocess_dpr: separate inputs: [CLS] query [SEP], [CLS] passage [SEP]
psg_cache_format: either pkl or npz
"""
super().__init__(tokenizer, max_query_length, max_passage_length)
self.num_negatives = num_negatives
self.passages = getattr(self, f"parse_{psg_cache_format}")(passages, max_passage_length)
self._psgid2tokens = getattr(self, f"psgid2tokens_{psg_cache_format}")
self.queries = self.parse_pkl(queries, max_query_length)
self.idx2psgs = self.parse_query_to_passages(query_to_passages)
self._preprocess_fn = getattr(self, preprocess_fn)
def __getitem__(self, idx):
query_and_psgs = self.idx2psgs[idx]
query_id, psg_ids = query_and_psgs[0], query_and_psgs[1:]
inputs = self._preprocess_fn(query_id, psg_ids)
return [*inputs, query_id, np.array(psg_ids)]
def __len__(self):
return len(self.idx2psgs)
def parse_query_to_passages(self, file):
"""
Function which parses query to passages correspondence file.
"""
idx2psgs = {}
idx = 0
for line in open(file, "r").readlines():
if "\t" not in line:
raise ValueError(f"Provided dataset does not have a form of tsv file")
query_and_psgs = line.split("\t")
query_and_psgs_ids = [int(id_) for id_ in query_and_psgs]
query_and_rel_psg_ids, irrel_psgs_ids = query_and_psgs_ids[:2], query_and_psgs_ids[2:]
random.shuffle(irrel_psgs_ids)
num_samples = len(irrel_psgs_ids) // self.num_negatives
for j in range(num_samples):
left = self.num_negatives * j
right = self.num_negatives * (j + 1)
idx2psgs[idx] = query_and_rel_psg_ids + irrel_psgs_ids[left:right]
idx += 1
return idx2psgs
|
StarcoderdataPython
|
6543960
|
# ./pyxb/bundles/wssplat/raw/wsdl11.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:d363f64a147eb09d66a961a815c9d842964c1c79
# Generated 2016-09-18 17:34:04.329631 by PyXB version 1.2.5 using Python 2.7.12.final.0
# Namespace http://schemas.xmlsoap.org/wsdl/
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:00f20444-7df0-11e6-9fa4-3497f68b2e96')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.5'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# A holder for module-level binding classes so we can access them from
# inside class definitions where property names may conflict.
_module_typeBindings = pyxb.utils.utility.Object()
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://schemas.xmlsoap.org/wsdl/', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tDocumentation with content type MIXED
class tDocumentation (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tDocumentation with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tDocumentation')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 37, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.tDocumentation = tDocumentation
Namespace.addCategoryObject('typeBinding', 'tDocumentation', tDocumentation)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tDocumented with content type ELEMENT_ONLY
class tDocumented (pyxb.binding.basis.complexTypeDefinition):
"""
This type is extended by component types to allow them to be documented
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tDocumented')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 43, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://schemas.xmlsoap.org/wsdl/}documentation uses Python identifier documentation
__documentation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'documentation'), 'documentation', '__httpschemas_xmlsoap_orgwsdl_tDocumented_httpschemas_xmlsoap_orgwsdldocumentation', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6), )
documentation = property(__documentation.value, __documentation.set, None, None)
_ElementMap.update({
__documentation.name() : __documentation
})
_AttributeMap.update({
})
_module_typeBindings.tDocumented = tDocumented
Namespace.addCategoryObject('typeBinding', 'tDocumented', tDocumented)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tExtensibilityElement with content type EMPTY
class tExtensibilityElement (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tExtensibilityElement with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tExtensibilityElement')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 306, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute {http://schemas.xmlsoap.org/wsdl/}required uses Python identifier required
__required = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(Namespace, 'required'), 'required', '__httpschemas_xmlsoap_orgwsdl_tExtensibilityElement_httpschemas_xmlsoap_orgwsdlrequired', pyxb.binding.datatypes.boolean)
__required._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 305, 2)
__required._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 307, 4)
required = property(__required.value, __required.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__required.name() : __required
})
_module_typeBindings.tExtensibilityElement = tExtensibilityElement
Namespace.addCategoryObject('typeBinding', 'tExtensibilityElement', tExtensibilityElement)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tExtensibleAttributesDocumented with content type ELEMENT_ONLY
class tExtensibleAttributesDocumented (tDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tExtensibleAttributesDocumented with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tExtensibleAttributesDocumented')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 54, 2)
_ElementMap = tDocumented._ElementMap.copy()
_AttributeMap = tDocumented._AttributeMap.copy()
# Base type is tDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/'))
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.tExtensibleAttributesDocumented = tExtensibleAttributesDocumented
Namespace.addCategoryObject('typeBinding', 'tExtensibleAttributesDocumented', tExtensibleAttributesDocumented)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tExtensibleDocumented with content type ELEMENT_ONLY
class tExtensibleDocumented (tDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tExtensibleDocumented with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tExtensibleDocumented')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 67, 2)
_ElementMap = tDocumented._ElementMap.copy()
_AttributeMap = tDocumented._AttributeMap.copy()
# Base type is tDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.tExtensibleDocumented = tExtensibleDocumented
Namespace.addCategoryObject('typeBinding', 'tExtensibleDocumented', tExtensibleDocumented)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tDefinitions with content type ELEMENT_ONLY
class tDefinitions (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tDefinitions with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tDefinitions')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 131, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}import uses Python identifier import_
__import = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'import'), 'import_', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_httpschemas_xmlsoap_orgwsdlimport', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 112, 6), )
import_ = property(__import.value, __import.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}types uses Python identifier types
__types = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'types'), 'types', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_httpschemas_xmlsoap_orgwsdltypes', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 113, 6), )
types = property(__types.value, __types.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}message uses Python identifier message
__message = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'message'), 'message', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_httpschemas_xmlsoap_orgwsdlmessage', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 114, 6), )
message = property(__message.value, __message.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}portType uses Python identifier portType
__portType = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'portType'), 'portType', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_httpschemas_xmlsoap_orgwsdlportType', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 120, 6), )
portType = property(__portType.value, __portType.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}binding uses Python identifier binding
__binding = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'binding'), 'binding', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_httpschemas_xmlsoap_orgwsdlbinding', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 121, 6), )
binding = property(__binding.value, __binding.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}service uses Python identifier service
__service = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'service'), 'service', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_httpschemas_xmlsoap_orgwsdlservice', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 122, 6), )
service = property(__service.value, __service.set, None, None)
# Attribute targetNamespace uses Python identifier targetNamespace
__targetNamespace = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'targetNamespace'), 'targetNamespace', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_targetNamespace', pyxb.binding.datatypes.anyURI)
__targetNamespace._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 137, 8)
__targetNamespace._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 137, 8)
targetNamespace = property(__targetNamespace.value, __targetNamespace.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tDefinitions_name', pyxb.binding.datatypes.NCName)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 138, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 138, 8)
name = property(__name.value, __name.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__import.name() : __import,
__types.name() : __types,
__message.name() : __message,
__portType.name() : __portType,
__binding.name() : __binding,
__service.name() : __service
})
_AttributeMap.update({
__targetNamespace.name() : __targetNamespace,
__name.name() : __name
})
_module_typeBindings.tDefinitions = tDefinitions
Namespace.addCategoryObject('typeBinding', 'tDefinitions', tDefinitions)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tImport with content type ELEMENT_ONLY
class tImport (tExtensibleAttributesDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tImport with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tImport')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 143, 2)
_ElementMap = tExtensibleAttributesDocumented._ElementMap.copy()
_AttributeMap = tExtensibleAttributesDocumented._AttributeMap.copy()
# Base type is tExtensibleAttributesDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute namespace uses Python identifier namespace
__namespace = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'namespace'), 'namespace', '__httpschemas_xmlsoap_orgwsdl_tImport_namespace', pyxb.binding.datatypes.anyURI, required=True)
__namespace._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 146, 8)
__namespace._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 146, 8)
namespace = property(__namespace.value, __namespace.set, None, None)
# Attribute location uses Python identifier location
__location = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'location'), 'location', '__httpschemas_xmlsoap_orgwsdl_tImport_location', pyxb.binding.datatypes.anyURI, required=True)
__location._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 147, 8)
__location._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 147, 8)
location = property(__location.value, __location.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/'))
_ElementMap.update({
})
_AttributeMap.update({
__namespace.name() : __namespace,
__location.name() : __location
})
_module_typeBindings.tImport = tImport
Namespace.addCategoryObject('typeBinding', 'tImport', tImport)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tTypes with content type ELEMENT_ONLY
class tTypes (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tTypes with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tTypes')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 152, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.tTypes = tTypes
Namespace.addCategoryObject('typeBinding', 'tTypes', tTypes)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tMessage with content type ELEMENT_ONLY
class tMessage (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tMessage with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tMessage')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 158, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}part uses Python identifier part
__part = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'part'), 'part', '__httpschemas_xmlsoap_orgwsdl_tMessage_httpschemas_xmlsoap_orgwsdlpart', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 162, 10), )
part = property(__part.value, __part.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tMessage_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 164, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 164, 8)
name = property(__name.value, __name.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__part.name() : __part
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.tMessage = tMessage
Namespace.addCategoryObject('typeBinding', 'tMessage', tMessage)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tPart with content type ELEMENT_ONLY
class tPart (tExtensibleAttributesDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tPart with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tPart')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 169, 2)
_ElementMap = tExtensibleAttributesDocumented._ElementMap.copy()
_AttributeMap = tExtensibleAttributesDocumented._AttributeMap.copy()
# Base type is tExtensibleAttributesDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tPart_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 172, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 172, 8)
name = property(__name.value, __name.set, None, None)
# Attribute element uses Python identifier element
__element = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'element'), 'element', '__httpschemas_xmlsoap_orgwsdl_tPart_element', pyxb.binding.datatypes.QName)
__element._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 173, 8)
__element._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 173, 8)
element = property(__element.value, __element.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'type'), 'type', '__httpschemas_xmlsoap_orgwsdl_tPart_type', pyxb.binding.datatypes.QName)
__type._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 174, 8)
__type._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 174, 8)
type = property(__type.value, __type.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/'))
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__element.name() : __element,
__type.name() : __type
})
_module_typeBindings.tPart = tPart
Namespace.addCategoryObject('typeBinding', 'tPart', tPart)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tPortType with content type ELEMENT_ONLY
class tPortType (tExtensibleAttributesDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tPortType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tPortType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 179, 2)
_ElementMap = tExtensibleAttributesDocumented._ElementMap.copy()
_AttributeMap = tExtensibleAttributesDocumented._AttributeMap.copy()
# Base type is tExtensibleAttributesDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}operation uses Python identifier operation
__operation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'operation'), 'operation', '__httpschemas_xmlsoap_orgwsdl_tPortType_httpschemas_xmlsoap_orgwsdloperation', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 183, 10), )
operation = property(__operation.value, __operation.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tPortType_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 185, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 185, 8)
name = property(__name.value, __name.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/'))
_ElementMap.update({
__operation.name() : __operation
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.tPortType = tPortType
Namespace.addCategoryObject('typeBinding', 'tPortType', tPortType)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tOperation with content type ELEMENT_ONLY
class tOperation (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tOperation with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tOperation')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 190, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}input uses Python identifier input
__input = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'input'), 'input', '__httpschemas_xmlsoap_orgwsdl_tOperation_httpschemas_xmlsoap_orgwsdlinput', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 207, 6), )
input = property(__input.value, __input.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}output uses Python identifier output
__output = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'output'), 'output', '__httpschemas_xmlsoap_orgwsdl_tOperation_httpschemas_xmlsoap_orgwsdloutput', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 209, 5), )
output = property(__output.value, __output.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}fault uses Python identifier fault
__fault = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'fault'), 'fault', '__httpschemas_xmlsoap_orgwsdl_tOperation_httpschemas_xmlsoap_orgwsdlfault', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 210, 2), )
fault = property(__fault.value, __fault.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tOperation_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 199, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 199, 8)
name = property(__name.value, __name.set, None, None)
# Attribute parameterOrder uses Python identifier parameterOrder
__parameterOrder = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'parameterOrder'), 'parameterOrder', '__httpschemas_xmlsoap_orgwsdl_tOperation_parameterOrder', pyxb.binding.datatypes.NMTOKENS)
__parameterOrder._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 200, 8)
__parameterOrder._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 200, 8)
parameterOrder = property(__parameterOrder.value, __parameterOrder.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__input.name() : __input,
__output.name() : __output,
__fault.name() : __fault
})
_AttributeMap.update({
__name.name() : __name,
__parameterOrder.name() : __parameterOrder
})
_module_typeBindings.tOperation = tOperation
Namespace.addCategoryObject('typeBinding', 'tOperation', tOperation)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tParam with content type ELEMENT_ONLY
class tParam (tExtensibleAttributesDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tParam with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tParam')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 225, 2)
_ElementMap = tExtensibleAttributesDocumented._ElementMap.copy()
_AttributeMap = tExtensibleAttributesDocumented._AttributeMap.copy()
# Base type is tExtensibleAttributesDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tParam_name', pyxb.binding.datatypes.NCName)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 228, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 228, 8)
name = property(__name.value, __name.set, None, None)
# Attribute message uses Python identifier message
__message = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'message'), 'message', '__httpschemas_xmlsoap_orgwsdl_tParam_message', pyxb.binding.datatypes.QName, required=True)
__message._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 229, 8)
__message._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 229, 8)
message = property(__message.value, __message.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/'))
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__message.name() : __message
})
_module_typeBindings.tParam = tParam
Namespace.addCategoryObject('typeBinding', 'tParam', tParam)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tFault with content type ELEMENT_ONLY
class tFault (tExtensibleAttributesDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tFault with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tFault')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 234, 2)
_ElementMap = tExtensibleAttributesDocumented._ElementMap.copy()
_AttributeMap = tExtensibleAttributesDocumented._AttributeMap.copy()
# Base type is tExtensibleAttributesDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tFault_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 237, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 237, 8)
name = property(__name.value, __name.set, None, None)
# Attribute message uses Python identifier message
__message = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'message'), 'message', '__httpschemas_xmlsoap_orgwsdl_tFault_message', pyxb.binding.datatypes.QName, required=True)
__message._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 238, 8)
__message._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 238, 8)
message = property(__message.value, __message.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/'))
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__message.name() : __message
})
_module_typeBindings.tFault = tFault
Namespace.addCategoryObject('typeBinding', 'tFault', tFault)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tBinding with content type ELEMENT_ONLY
class tBinding (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tBinding with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tBinding')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 243, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}operation uses Python identifier operation
__operation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'operation'), 'operation', '__httpschemas_xmlsoap_orgwsdl_tBinding_httpschemas_xmlsoap_orgwsdloperation', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 247, 10), )
operation = property(__operation.value, __operation.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tBinding_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 249, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 249, 8)
name = property(__name.value, __name.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'type'), 'type', '__httpschemas_xmlsoap_orgwsdl_tBinding_type', pyxb.binding.datatypes.QName, required=True)
__type._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 250, 8)
__type._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 250, 8)
type = property(__type.value, __type.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__operation.name() : __operation
})
_AttributeMap.update({
__name.name() : __name,
__type.name() : __type
})
_module_typeBindings.tBinding = tBinding
Namespace.addCategoryObject('typeBinding', 'tBinding', tBinding)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tBindingOperationMessage with content type ELEMENT_ONLY
class tBindingOperationMessage (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tBindingOperationMessage with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tBindingOperationMessage')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 255, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tBindingOperationMessage_name', pyxb.binding.datatypes.NCName)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 258, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 258, 8)
name = property(__name.value, __name.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.tBindingOperationMessage = tBindingOperationMessage
Namespace.addCategoryObject('typeBinding', 'tBindingOperationMessage', tBindingOperationMessage)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tBindingOperationFault with content type ELEMENT_ONLY
class tBindingOperationFault (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tBindingOperationFault with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tBindingOperationFault')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 263, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tBindingOperationFault_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 266, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 266, 8)
name = property(__name.value, __name.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.tBindingOperationFault = tBindingOperationFault
Namespace.addCategoryObject('typeBinding', 'tBindingOperationFault', tBindingOperationFault)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tBindingOperation with content type ELEMENT_ONLY
class tBindingOperation (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tBindingOperation with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tBindingOperation')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 271, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}input uses Python identifier input
__input = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'input'), 'input', '__httpschemas_xmlsoap_orgwsdl_tBindingOperation_httpschemas_xmlsoap_orgwsdlinput', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 275, 10), )
input = property(__input.value, __input.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}output uses Python identifier output
__output = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'output'), 'output', '__httpschemas_xmlsoap_orgwsdl_tBindingOperation_httpschemas_xmlsoap_orgwsdloutput', False, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 276, 10), )
output = property(__output.value, __output.set, None, None)
# Element {http://schemas.xmlsoap.org/wsdl/}fault uses Python identifier fault
__fault = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'fault'), 'fault', '__httpschemas_xmlsoap_orgwsdl_tBindingOperation_httpschemas_xmlsoap_orgwsdlfault', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 277, 10), )
fault = property(__fault.value, __fault.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tBindingOperation_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 279, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 279, 8)
name = property(__name.value, __name.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__input.name() : __input,
__output.name() : __output,
__fault.name() : __fault
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.tBindingOperation = tBindingOperation
Namespace.addCategoryObject('typeBinding', 'tBindingOperation', tBindingOperation)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tService with content type ELEMENT_ONLY
class tService (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tService with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tService')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 284, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Element {http://schemas.xmlsoap.org/wsdl/}port uses Python identifier port
__port = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'port'), 'port', '__httpschemas_xmlsoap_orgwsdl_tService_httpschemas_xmlsoap_orgwsdlport', True, pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 288, 10), )
port = property(__port.value, __port.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tService_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 290, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 290, 8)
name = property(__name.value, __name.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
__port.name() : __port
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.tService = tService
Namespace.addCategoryObject('typeBinding', 'tService', tService)
# Complex type {http://schemas.xmlsoap.org/wsdl/}tPort with content type ELEMENT_ONLY
class tPort (tExtensibleDocumented):
"""Complex type {http://schemas.xmlsoap.org/wsdl/}tPort with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tPort')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 295, 2)
_ElementMap = tExtensibleDocumented._ElementMap.copy()
_AttributeMap = tExtensibleDocumented._AttributeMap.copy()
# Base type is tExtensibleDocumented
# Element documentation ({http://schemas.xmlsoap.org/wsdl/}documentation) inherited from {http://schemas.xmlsoap.org/wsdl/}tDocumented
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__httpschemas_xmlsoap_orgwsdl_tPort_name', pyxb.binding.datatypes.NCName, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 298, 8)
__name._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 298, 8)
name = property(__name.value, __name.set, None, None)
# Attribute binding uses Python identifier binding
__binding = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'binding'), 'binding', '__httpschemas_xmlsoap_orgwsdl_tPort_binding', pyxb.binding.datatypes.QName, required=True)
__binding._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 299, 8)
__binding._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 299, 8)
binding = property(__binding.value, __binding.set, None, None)
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__binding.name() : __binding
})
_module_typeBindings.tPort = tPort
Namespace.addCategoryObject('typeBinding', 'tPort', tPort)
definitions = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'definitions'), tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 82, 2))
Namespace.addCategoryObject('elementBinding', definitions.name().localName(), definitions)
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 39, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 39, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tDocumentation._Automaton = _BuildAutomaton()
tDocumented._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'documentation'), tDocumentation, scope=tDocumented, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tDocumented._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tDocumented._Automaton = _BuildAutomaton_()
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tExtensibleAttributesDocumented._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tExtensibleAttributesDocumented._Automaton = _BuildAutomaton_2()
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tExtensibleDocumented._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tExtensibleDocumented._Automaton = _BuildAutomaton_3()
tDefinitions._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'import'), tImport, scope=tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 112, 6)))
tDefinitions._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'types'), tTypes, scope=tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 113, 6)))
tDefinitions._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'message'), tMessage, scope=tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 114, 6)))
tDefinitions._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'portType'), tPortType, scope=tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 120, 6)))
tDefinitions._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'binding'), tBinding, scope=tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 121, 6)))
tDefinitions._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'service'), tService, scope=tDefinitions, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 122, 6)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 135, 10))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'import')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 112, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'types')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 113, 6))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'message')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 114, 6))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'portType')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 120, 6))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'binding')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 121, 6))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tDefinitions._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'service')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 122, 6))
st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, True) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, True) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, True) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, True) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, True) ]))
st_7._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tDefinitions._Automaton = _BuildAutomaton_4()
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tImport._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tImport._Automaton = _BuildAutomaton_5()
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tTypes._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tTypes._Automaton = _BuildAutomaton_6()
tMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'part'), tPart, scope=tMessage, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 162, 10)))
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 162, 10))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'part')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 162, 10))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tMessage._Automaton = _BuildAutomaton_7()
def _BuildAutomaton_8 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_8
del _BuildAutomaton_8
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tPart._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tPart._Automaton = _BuildAutomaton_8()
tPortType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'operation'), tOperation, scope=tPortType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 183, 10)))
def _BuildAutomaton_9 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_9
del _BuildAutomaton_9
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 183, 10))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tPortType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(tPortType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'operation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 183, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tPortType._Automaton = _BuildAutomaton_9()
tOperation._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'input'), tParam, scope=tOperation, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 207, 6)))
tOperation._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'output'), tParam, scope=tOperation, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 209, 5)))
tOperation._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'fault'), tFault, scope=tOperation, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 210, 2)))
def _BuildAutomaton_10 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_10
del _BuildAutomaton_10
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 208, 3))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 210, 2))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 218, 3))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 220, 2))
counters.add(cc_5)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'input')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 207, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'output')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 209, 5))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'fault')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 210, 2))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'output')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 217, 6))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'input')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 219, 5))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(tOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'fault')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 220, 2))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True),
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_7, [
]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, True),
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_5, True) ]))
st_7._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
tOperation._Automaton = _BuildAutomaton_10()
def _BuildAutomaton_11 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_11
del _BuildAutomaton_11
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tParam._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tParam._Automaton = _BuildAutomaton_11()
def _BuildAutomaton_12 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_12
del _BuildAutomaton_12
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tFault._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tFault._Automaton = _BuildAutomaton_12()
tBinding._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'operation'), tBindingOperation, scope=tBinding, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 247, 10)))
def _BuildAutomaton_13 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_13
del _BuildAutomaton_13
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 247, 10))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tBinding._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tBinding._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'operation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 247, 10))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tBinding._Automaton = _BuildAutomaton_13()
def _BuildAutomaton_14 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_14
del _BuildAutomaton_14
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tBindingOperationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tBindingOperationMessage._Automaton = _BuildAutomaton_14()
def _BuildAutomaton_15 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_15
del _BuildAutomaton_15
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tBindingOperationFault._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tBindingOperationFault._Automaton = _BuildAutomaton_15()
tBindingOperation._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'input'), tBindingOperationMessage, scope=tBindingOperation, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 275, 10)))
tBindingOperation._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'output'), tBindingOperationMessage, scope=tBindingOperation, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 276, 10)))
tBindingOperation._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'fault'), tBindingOperationFault, scope=tBindingOperation, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 277, 10)))
def _BuildAutomaton_16 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_16
del _BuildAutomaton_16
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 275, 10))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 276, 10))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 277, 10))
counters.add(cc_4)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tBindingOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tBindingOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'input')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 275, 10))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(tBindingOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'output')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 276, 10))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(tBindingOperation._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'fault')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 277, 10))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tBindingOperation._Automaton = _BuildAutomaton_16()
tService._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'port'), tPort, scope=tService, location=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 288, 10)))
def _BuildAutomaton_17 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_17
del _BuildAutomaton_17
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 288, 10))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tService._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(tService._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'port')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 288, 10))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tService._Automaton = _BuildAutomaton_17()
def _BuildAutomaton_18 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_18
del _BuildAutomaton_18
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(tPort._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/wsdl/')), pyxb.utils.utility.Location('/tmp/pyxbdist.mjW1MNk/PyXB-1.2.5/pyxb/bundles/wssplat/schemas/wsdl11.xsd', 76, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
tPort._Automaton = _BuildAutomaton_18()
|
StarcoderdataPython
|
5097165
|
#pragma error
#pragma repy restrictions.fewevents
def foo(timername):
mycontext[timername] = True
sleep(2)
if callfunc=='initialize':
mycontext['timetogo'] = False
myval = settimer(.2, foo, ('a',))
myval = settimer(.3, foo, ('b',))
myval = settimer(.4, foo, ('c',))
sleep(1)
if mycontext['a'] and mycontext['b'] and mycontext['c']:
print "Bye!"
|
StarcoderdataPython
|
146966
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of Beam TFX runner."""
import os
from typing import Any, Iterable
from absl import logging
import apache_beam as beam
from tfx.orchestration import metadata
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import tfx_runner
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import telemetry_utils
from ml_metadata.proto import metadata_store_pb2
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class _PipelineNodeAsDoFn(beam.DoFn):
"""Wrap component as beam DoFn."""
def __init__(self,
pipeline_node: pipeline_pb2.PipelineNode,
mlmd_connection: metadata.Metadata,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec):
"""Initializes the _PipelineNodeAsDoFn.
Args:
pipeline_node: The specification of the node that this launcher lauches.
mlmd_connection: ML metadata connection. The connection is expected to
not be opened before launcher is initiated.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
"""
self._launcher = launcher.Launcher(
pipeline_node=pipeline_node,
mlmd_connection=mlmd_connection,
pipeline_info=pipeline_info,
pipeline_runtime_spec=pipeline_runtime_spec)
self._component_id = pipeline_node.node_info.id
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes component based on signals.
Args:
element: a signal element to trigger the component.
*signals: side input signals indicate completeness of upstream components.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
self._run_component()
def _run_component(self) -> None:
logging.info('Component %s is running.', self._component_id)
self._launcher.launch()
logging.info('Component %s is finished.', self._component_id)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
def __init__(self):
"""Initializes BeamDagRunner as a TFX orchestrator.
"""
def run(self, pipeline: pipeline_pb2.Pipeline) -> None:
"""Deploys given logical pipeline on Beam.
Args:
pipeline: Logical pipeline in IR format.
"""
# For CLI, while creating or updating pipeline, pipeline_args are extracted
# and hence we avoid deploying the pipeline.
if 'TFX_JSON_EXPORT_PIPELINE_ARGS_PATH' in os.environ:
return
# TODO(b/163003901): Support beam DAG runner args through IR.
# TODO(b/163003901): MLMD connection config should be passed in via IR.
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.SetInParent()
mlmd_connection = metadata.Metadata(
connection_config=connection_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline() as p:
# Uses for triggering the component DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of component to its signal.
signal_map = {}
# pipeline.components are in topological order.
for node in pipeline.nodes:
# TODO(b/160882349): Support subpipeline
pipeline_node = node.pipeline_node
component_id = pipeline_node.node_info.id
# Signals from upstream components.
signals_to_wait = []
for upstream_node in pipeline_node.upstream_nodes:
assert upstream_node in signal_map, ('Components is not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
logging.info('Component %s depends on %s.', component_id,
[s.producer.full_label for s in signals_to_wait])
# Each signal is an empty PCollection. AsIter ensures component will
# be triggered after upstream components are finished.
# LINT.IfChange
signal_map[component_id] = (
root
| 'Run[%s]' % component_id >> beam.ParDo(
_PipelineNodeAsDoFn(pipeline_node, mlmd_connection,
pipeline.pipeline_info,
pipeline.runtime_spec), *
[beam.pvalue.AsIter(s) for s in signals_to_wait]))
# LINT.ThenChange(../beam/beam_dag_runner.py)
logging.info('Component %s is scheduled.', component_id)
|
StarcoderdataPython
|
6443411
|
<gh_stars>1-10
import json
import os
import requests
from urllib3 import Retry
class Reporter:
def __init__(self, cookie: str) -> None:
self.__read_sites()
self.__init_session(cookie)
def __read_sites(self) -> None:
with open(os.path.join("src", "sites.json"), "r", encoding="utf-8") as fr:
self.__sites = json.load(fr)
def __init_session(self, cookie: str) -> None:
self.__session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
max_retries=Retry(
total=50, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]
)
)
self.__session.mount("http://", adapter)
self.__session.mount("https://", adapter)
self.__session.cookies.update({"SESSION": cookie})
self.__session.headers.update({"content-type": "application/json"})
def __request(self, api: str) -> dict:
site = self.__sites[api]
return self.__session.request(
method=site["method"],
url=site["url"],
timeout=5,
json=site["data"],
).json()
def run(self) -> tuple[bool, str]:
status = self.__request("status")["data"]
if status == None:
return (False, "invalid cookie")
elif status["appliedTimes"] != 0:
return (True, "duplicated")
elif status["schoolStatus"] == 0:
response = self.__request("unreturned")
elif status["schoolStatus"] == 1:
response = self.__request("returned")
else:
return (False, "invalid status")
if response["data"] == True:
return (True, "success")
else:
return (False, "invalid data")
if __name__ == "__main__":
cookies = os.environ.get("COOKIES")
if cookies == None:
raise Exception("session id not provided")
else:
cookies = cookies.split("#")
results = []
for index, cookie in enumerate(cookies):
reporter = Reporter(cookie)
result, message = reporter.run()
results.append(result)
print(f"Student {index+1}: {message}")
if not all(results):
exit(-1)
|
StarcoderdataPython
|
26782
|
# -*- coding: utf-8 -*-
# Author: t0pep0
# e-mail: <EMAIL>
# Jabber: <EMAIL>
# BTC : 1ipEA2fcVyjiUnBqUx7PVy5efktz2hucb
# donate free =)
# Forked and modified by <NAME>
# Compatible Python3
import hmac
import hashlib
import time
import urllib.request, urllib.parse, urllib.error
import json
class Api(object):
__username = ''
__api_key = ''
__api_secret = ''
__nonce_v = ''
# Init class
def __init__(self, username, api_key, api_secret):
self.__username = username
self.__api_key = api_key
self.__api_secret = api_secret
# get timestamp as nonce
def __nonce(self):
self.__nonce_v = '{:.10f}'.format(time.time() * 1000).split('.')[0]
# generate signature
def __signature(self):
byte_secret = bytes(self.__api_secret, "ascii")
string = self.__nonce_v + self.__username + self.__api_key # create string
encode_string = string.encode ('utf-8')
signature = hmac.new(byte_secret, encode_string, digestmod=hashlib.sha256).hexdigest().upper() # create signature
return signature
def __post(self, url, param): # Post Request (Low Level API call)
post_url = url
header = { 'User-agent': 'bot-cex.io-' }
params = urllib.parse.urlencode(param)
post_data = params.encode( "ascii")
req = urllib.request.Request(url = post_url, data = post_data, headers = header)
page = urllib.request.urlopen(req).read()
return page
def api_call(self, method, param={}, private=0, couple=''): # api call (Middle level)
url = 'https://cex.io/api/' + method + '/' # generate url
if couple != '':
url = url + couple + '/' # set couple if needed
if private == 1: # add auth-data if needed
self.__nonce()
param.update({
'key': self.__api_key,
'signature': self.__signature(),
'nonce': self.__nonce_v})
answer = self.__post(url, param) # Post Request
a = answer.decode("utf-8")
#return json.loads(answer) # generate dict and return
return a # generates a valid json output
def ticker(self, couple='MHC/BTC'):
return self.api_call('ticker', {}, 0, couple)
def tickers(self, couple='USD'):
return self.api_call('tickers', {}, 0, couple)
def ohlcv(self, end_date, couple='BTC/USD'):
return self.api_call('ohlcv/hd/'+str(end_date), {}, 0, couple)
def order_book(self, couple='MHC/BTC'):
return self.api_call('order_book', {}, 0, couple)
def trade_history(self, since=1, couple='MHC/BTC'):
return self.api_call('trade_history', {"since": str(since)}, 0, couple)
def balance(self):
return self.api_call('balance', {}, 1)
def current_orders(self, couple='MHC/BTC'):
return self.api_call('open_orders', {}, 1, couple)
def cancel_order(self, order_id):
return self.api_call('cancel_order', {"id": order_id}, 1)
def place_order(self, ptype='buy', amount=1, price=1, couple='MHC/BTC'):
return self.api_call('place_order', {"type": ptype, "amount": str(amount), "price": str(price)}, 1, couple)
def archived_orders(self, couple='XLM/USD'):
return self.api_call('archived_orders', {}, 1, couple)
def price_stats(self, last_hours, max_resp_arr_size, couple='MHC/BTC'):
return self.api_call(
'price_stats',
{"lastHours": last_hours, "maxRespArrSize": max_resp_arr_size},
0, couple)
|
StarcoderdataPython
|
1812593
|
from django.shortcuts import render
# Create your views here.
from django.db import transaction
from django.utils.decorators import method_decorator
#create global transactional class mixin
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
class TransactionalViewMixin(object):
"""This is a global view wrapper that provides ,
transactions and filters
"""
filter_backends = (DjangoFilterBackend,filters.SearchFilter,)
def perform_destroy(self,model_object):
""" called by generic detail view for flagging is_deleted to True.
"""
model_object.is_deleted=True
model_object.save()
@method_decorator(transaction.atomic)
def dispatch(self, *args, **kwargs):
return super(TransactionalViewMixin, self).dispatch(*args, **kwargs)
|
StarcoderdataPython
|
6598969
|
<filename>src/empirical_fire_modelling/analysis/pfi.py
# -*- coding: utf-8 -*-
"""PFI calculation."""
import eli5
from wildfires.qstat import get_ncpus
from ..cache import cache
@cache
def calculate_pfi(rf, X, y):
"""Calculate the PFI."""
rf.n_jobs = get_ncpus()
perm_importance = eli5.sklearn.PermutationImportance(rf, random_state=1).fit(X, y)
return eli5.explain_weights_df(perm_importance, feature_names=list(X.columns))
|
StarcoderdataPython
|
11290772
|
<filename>scripts/mmd/MMD-critic/mmd.py
# maintained by <EMAIL>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# from mpi4py import MPI
import sys
import math
##############################################################################################################################
# function to select criticisms
# ARGS:
# K: Kernel matrix
# selectedprotos: prototypes already selected
# m : number of criticisms to be selected
# reg: regularizer type.
# is_K_sparse: True means K is the pre-computed csc sparse matrix? False means it is a dense matrix.
# RETURNS: indices selected as criticisms
##############################################################################################################################
def select_criticism_regularized(K, selectedprotos, m, reg='logdet', is_K_sparse=True):
n = np.shape(K)[0]
if reg in ['None','logdet','iterative']:
pass
else:
print("wrong regularizer :" + reg)
exit(1)
options = dict()
selected = np.array([], dtype=int)
candidates2 = np.setdiff1d(range(n), selectedprotos)
inverse_of_prev_selected = None # should be a matrix
if is_K_sparse:
colsum = np.array(K.sum(0)).ravel()/n
else:
colsum = np.sum(K, axis=0)/n
for i in range(m):
maxx = -sys.float_info.max
argmax = -1
candidates = np.setdiff1d(candidates2, selected)
s1array = colsum[candidates]
temp = K[selectedprotos, :][:, candidates]
if is_K_sparse:
s2array = temp.sum(0)
else:
s2array = np.sum(temp, axis=0)
s2array = s2array / (len(selectedprotos))
s1array = np.abs(s1array - s2array)
if reg == 'logdet':
if inverse_of_prev_selected is not None: # first call has been made already
temp = K[selected, :][:, candidates]
if is_K_sparse:
temp2 = temp.transpose().dot(inverse_of_prev_selected)
regularizer = temp.transpose().multiply(temp2)
regcolsum = regularizer.sum(1).ravel()# np.sum(regularizer, axis=0)
regularizer = np.abs(K.diagonal()[candidates] - regcolsum)
else:
# hadamard product
temp2 = np.array(np.dot(inverse_of_prev_selected, temp))
regularizer = temp2 * temp
regcolsum = np.sum(regularizer, axis=0)
regularizer = np.log(np.abs(np.diagonal(K)[candidates] - regcolsum))
s1array = s1array + regularizer
else:
if is_K_sparse:
s1array = s1array - np.log(np.abs(K.diagonal()[candidates]))
else:
s1array = s1array - np.log(np.abs(np.diagonal(K)[candidates]))
argmax = candidates[np.argmax(s1array)]
maxx = np.max(s1array)
selected = np.append(selected, argmax)
if reg == 'logdet':
KK = K[selected,:][:,selected]
if is_K_sparse:
KK = KK.todense()
inverse_of_prev_selected = np.linalg.inv(KK) # shortcut
if reg == 'iterative':
selectedprotos = np.append(selectedprotos, argmax)
return selected
##############################################################################################################################
# Function choose m of all rows by MMD as per kernelfunc
# ARGS:
# K : kernel matrix
# candidate_indices : array of potential choices for selections, returned values are chosen from these indices
# m: number of selections to be made
# is_K_sparse: True means K is the pre-computed csc sparse matrix? False means it is a dense matrix.
# RETURNS: subset of candidate_indices which are selected as prototypes
##############################################################################################################################
def greedy_select_protos(K, candidate_indices, m, is_K_sparse=False):
if len(candidate_indices) != np.shape(K)[0]:
K = K[:,candidate_indices][candidate_indices,:]
n = len(candidate_indices)
# colsum = np.array(K.sum(0)).ravel() # same as rowsum
if is_K_sparse:
colsum = 2*np.array(K.sum(0)).ravel() / n
else:
colsum = 2*np.sum(K, axis=0) / n
selected = np.array([], dtype=int)
value = np.array([])
for i in range(m):
maxx = -sys.float_info.max
argmax = -1
candidates = np.setdiff1d(range(n), selected)
s1array = colsum[candidates]
if len(selected) > 0:
temp = K[selected, :][:, candidates]
if is_K_sparse:
# s2array = temp.sum(0) *2
s2array = temp.sum(0) * 2 + K.diagonal()[candidates]
else:
s2array = np.sum(temp, axis=0) *2 + np.diagonal(K)[candidates]
s2array = s2array/(len(selected) + 1)
s1array = s1array - s2array
else:
if is_K_sparse:
s1array = s1array - (np.abs(K.diagonal()[candidates]))
else:
s1array = s1array - (np.abs(np.diagonal(K)[candidates]))
argmax = candidates[np.argmax(s1array)]
# print("max %f" %np.max(s1array))
selected = np.append(selected, argmax)
# value = np.append(value,maxx)
KK = K[selected, :][:, selected]
if is_K_sparse:
KK = KK.todense()
inverse_of_prev_selected = np.linalg.inv(KK) # shortcut
return candidate_indices[selected]
|
StarcoderdataPython
|
11211858
|
from motor_typing import TYPE_CHECKING
def options(option_context):
# type: (Options.OptionsContext) -> None
pass
if TYPE_CHECKING:
from waflib import Options
|
StarcoderdataPython
|
5121322
|
# utility functions for working with json
from types import *
# merges keys from j2 into j1
def json_merge_missing_keys(j1, j2, overwrite=False, exclude=[]):
for key in j2:
if ((not key in j1) or overwrite) and (key not in exclude):
j1[key] = j2[key];
def get_child_by_key_values(j1, kvs={}):
if 'children' in j1:
for c in j1['children']:
if type(c) is DictType:
match = True
for kv in kvs:
if not (kv in c and c[kv] == kvs[kv]):
match = False
if match:
return c
return None
|
StarcoderdataPython
|
201651
|
<gh_stars>1-10
#!/usr/bin/python
# (c) 2018-2019, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_vscan
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_vscan
short_description: NetApp ONTAP Vscan enable/disable.
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: 2.9.0
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
notes:
- on demand task, on_access_policy and scanner_pools must be set up before running this module
description:
- Enable and Disable Vscan
options:
enable:
description:
- Whether to enable to disable a Vscan
type: bool
default: True
vserver:
description:
- the name of the data vserver to use.
required: true
type: str
'''
EXAMPLES = """
- name: Enable Vscan
na_ontap_vscan:
enable: True
username: '{{ netapp_username }}'
password: '{{ <PASSWORD> }}'
hostname: '{{ netapp_hostname }}'
vserver: trident_svm
- name: Disable Vscan
na_ontap_vscan:
enable: False
username: '{{ netapp_username }}'
password: '{{ <PASSWORD> }}'
hostname: '{{ netapp_hostname }}'
vserver: trident_svm
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVscan(object):
''' enable/disable vscan '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
enable=dict(type='bool', default=True),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# API should be used for ONTAP 9.6 or higher, Zapi for lower version
self.rest_api = OntapRestAPI(self.module)
if self.rest_api.is_rest():
self.use_rest = True
else:
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
def get_vscan(self):
if self.use_rest:
params = {'fields': 'svm,enabled',
"svm.name": self.parameters['vserver']}
api = "protocols/vscan"
message, error = self.rest_api.get(api, params)
if error:
self.module.fail_json(msg=error)
return message['records'][0]
else:
vscan_status_iter = netapp_utils.zapi.NaElement('vscan-status-get-iter')
vscan_status_info = netapp_utils.zapi.NaElement('vscan-status-info')
vscan_status_info.add_new_child('vserver', self.parameters['vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(vscan_status_info)
vscan_status_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(vscan_status_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting Vscan info for Vserver %s: %s' %
(self.parameters['vserver'], to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return result.get_child_by_name('attributes-list').get_child_by_name('vscan-status-info')
def enable_vscan(self, uuid=None):
if self.use_rest:
params = {"svm.name": self.parameters['vserver']}
data = {"enabled": self.parameters['enable']}
api = "protocols/vscan/" + uuid
dummy, error = self.rest_api.patch(api, data, params)
if error is not None:
self.module.fail_json(msg=error)
else:
vscan_status_obj = netapp_utils.zapi.NaElement("vscan-status-modify")
vscan_status_obj.add_new_child('is-vscan-enabled', str(self.parameters['enable']))
try:
self.server.invoke_successfully(vscan_status_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error Enable/Disabling Vscan: %s" % to_native(error), exception=traceback.format_exc())
def asup_log(self):
if self.use_rest:
# TODO: logging for Rest
return
else:
# Either we are using ZAPI, or REST failed when it should not
try:
netapp_utils.ems_log_event("na_ontap_vscan", self.server)
except Exception:
# TODO: we may fail to connect to REST or ZAPI, the line below shows REST issues only
# self.module.fail_json(msg=repr(self.rest_api.errors), log=repr(self.rest_api.debug_logs))
pass
def apply(self):
changed = False
self.asup_log()
current = self.get_vscan()
if self.use_rest:
if current['enabled'] != self.parameters['enable']:
if not self.module.check_mode:
self.enable_vscan(current['svm']['uuid'])
changed = True
else:
if current.get_child_content('is-vscan-enabled') != str(self.parameters['enable']).lower():
if not self.module.check_mode:
self.enable_vscan()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Execute action from playbook
"""
command = NetAppOntapVscan()
command.apply()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3491775
|
import logging
import random
import string
from datetime import date, datetime
from enum import Enum, auto
from threading import RLock, Timer
from typing import TYPE_CHECKING, Dict, Literal, Optional
from localstack import config
from localstack.services.awslambda.invocation.executor_endpoint import ServiceEndpoint
from localstack.services.awslambda.invocation.lambda_models import FunctionVersion
from localstack.services.awslambda.invocation.runtime_executor import RuntimeExecutor
from localstack.utils.strings import to_str
if TYPE_CHECKING:
from localstack.services.awslambda.invocation.version_manager import QueuedInvocation
STARTUP_TIMEOUT_SEC = 10.0
HEX_CHARS = [str(num) for num in range(10)] + ["a", "b", "c", "d", "e", "f"]
LOG = logging.getLogger(__name__)
class RuntimeStatus(Enum):
INACTIVE = auto()
STARTING = auto()
READY = auto()
RUNNING = auto()
FAILED = auto()
STOPPED = auto()
InitializationType = Literal["on-demand", "provisioned-concurrency"]
class InvalidStatusException(Exception):
def __init__(self, message: str):
super().__init__(message)
def generate_runtime_id() -> str:
return "".join(random.choices(string.hexdigits[:16], k=32)).lower()
class RuntimeEnvironment:
runtime_executor: RuntimeExecutor
status_lock: RLock
status: RuntimeStatus
initialization_type: InitializationType
last_returned: datetime
startup_timer: Optional[Timer]
def __init__(
self,
function_version: FunctionVersion,
initialization_type: InitializationType,
service_endpoint: ServiceEndpoint,
):
self.id = generate_runtime_id()
self.status = RuntimeStatus.INACTIVE
self.status_lock = RLock()
self.function_version = function_version
self.initialization_type = initialization_type
self.runtime_executor = RuntimeExecutor(
self.id, function_version, service_endpoint=service_endpoint
)
self.last_returned = datetime.min
self.startup_timer = None
def get_log_group_name(self) -> str:
return f"/aws/lambda/{self.function_version.id.function_name}"
def get_log_stream_name(self) -> str:
return f"{date.today():%Y/%m/%d}/[{self.function_version.qualifier}]{self.id}"
def get_environment_variables(self) -> Dict[str, str]:
"""
Returns the environment variable set for the runtime container
:return: Dict of environment variables
"""
env_vars = {
# Runtime API specifics
"LOCALSTACK_RUNTIME_ID": self.id,
"LOCALSTACK_RUNTIME_ENDPOINT": f"http://{self.runtime_executor.get_endpoint_from_executor()}:{self.runtime_executor.executor_endpoint.port}",
# General Lambda Environment Variables
"AWS_LAMBDA_LOG_GROUP_NAME": self.get_log_group_name(),
"AWS_LAMBDA_LOG_STREAM_NAME": self.get_log_stream_name(),
"AWS_LAMBDA_FUNCTION_NAME": self.function_version.qualified_arn, # TODO use name instead of arn
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.function_version.config.timeout,
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.function_version.config.memory_size, # TODO use correct memory size
"AWS_LAMBDA_FUNCTION_VERSION": self.function_version.qualifier, # TODO use name instead of arn
"AWS_DEFAULT_REGION": self.function_version.qualified_arn, # TODO use region instead of arn
"AWS_REGION": self.function_version.qualified_arn, # TODO use region instead of arn
"TASK_ROOT": "/var/task", # TODO custom runtimes?
"RUNTIME_ROOT": "/var/runtime", # TODO custom runtimes?
"AWS_LAMBDA_INITIALIZATION_TYPE": self.initialization_type,
"TZ": ":UTC", # TODO does this have to match local system time? format?
# Access IDs for role TODO make dependent on role arn
"AWS_ACCESS_KEY_ID": "test",
"AWS_SECRET_ACCESS_KEY": "test",
"AWS_SESSION_TOKEN": "<PASSWORD>",
# TODO xray
# LocalStack endpoint specifics
"LOCALSTACK_HOSTNAME": self.runtime_executor.get_endpoint_from_executor(),
"EDGE_PORT": str(config.EDGE_PORT),
"AWS_ENDPOINT_URL": f"http://{self.runtime_executor.get_endpoint_from_executor()}:{config.EDGE_PORT}",
}
if self.function_version.config.handler:
env_vars["_HANDLER"] = self.function_version.config.handler
if self.function_version.config.runtime:
env_vars["AWS_EXECUTION_ENV"] = f"Aws_Lambda_{self.function_version.config.runtime}"
env_vars.update(self.function_version.config.environment)
return env_vars
# Lifecycle methods
def start(self) -> None:
"""
Starting the runtime environment
"""
with self.status_lock:
if self.status != RuntimeStatus.INACTIVE:
raise InvalidStatusException("Runtime Handler can only be started when inactive")
self.status = RuntimeStatus.STARTING
self.runtime_executor.start(self.get_environment_variables())
self.startup_timer = Timer(STARTUP_TIMEOUT_SEC, self.timed_out)
self.startup_timer.start()
def stop(self) -> None:
"""
Stopping the runtime environment
"""
with self.status_lock:
if self.status in [RuntimeStatus.INACTIVE, RuntimeStatus.STOPPED]:
raise InvalidStatusException("Runtime Handler cannot be shutdown before started")
self.runtime_executor.stop()
self.status = RuntimeStatus.STOPPED
# Status methods
def set_ready(self) -> None:
with self.status_lock:
if self.status != RuntimeStatus.STARTING:
raise InvalidStatusException(
"Runtime Handler can only be set active while starting"
)
self.status = RuntimeStatus.READY
if self.startup_timer:
self.startup_timer.cancel()
self.startup_timer = None
def invocation_done(self) -> None:
self.last_returned = datetime.now()
with self.status_lock:
if self.status != RuntimeStatus.RUNNING:
raise InvalidStatusException("Runtime Handler can only be set ready while running")
self.status = RuntimeStatus.READY
def timed_out(self) -> None:
LOG.debug(
"Executor %s for function %s timed out during startup",
self.id,
self.function_version.qualified_arn,
)
self.startup_timer = None
self.errored()
def errored(self) -> None:
with self.status_lock:
if self.status != RuntimeStatus.STARTING:
raise InvalidStatusException("Runtime Handler can only error while starting")
self.status = RuntimeStatus.FAILED
if self.startup_timer:
self.startup_timer.cancel()
try:
self.runtime_executor.stop()
except Exception:
LOG.debug("Unable to shutdown runtime handler '%s'", self.id)
def invoke(self, invocation_event: "QueuedInvocation") -> None:
with self.status_lock:
if self.status != RuntimeStatus.READY:
raise InvalidStatusException("Invoke can only happen if status is ready")
self.status = RuntimeStatus.RUNNING
invoke_payload = {
"invoke-id": invocation_event.invocation_id,
"payload": to_str(invocation_event.invocation.payload),
}
self.runtime_executor.invoke(payload=invoke_payload)
|
StarcoderdataPython
|
8086306
|
<reponame>technoborsch/AtomREST<gh_stars>0
NO_REMARKS_TIPS = [
'Это ненадолго :)',
'По крайней мере, мы о них не знаем',
'Это не значит, что всё правильно',
'Сейчас добавим :)',
'Держу в курсе',
'Можно расслабиться',
'Удивительно',
'Классно же',
'Вау!',
'Но ты-то сам знаешь, где ошибки, не так ли?',
'Error 404: errors not found',
'Никогда такого не было',
'...пока что.',
'Замечаний - нет, ошибки - есть.',
]
|
StarcoderdataPython
|
6687367
|
<reponame>marty1912/dna_app<filename>trialgen.py
#!/bin/python3
import sys
import json
import shutil
import os
import subprocess
import random
import time
import pandas as pd
import argparse
import copy
from os import listdir
from os.path import isfile, join, basename ,splitext
from os import walk
from pandas.core.frame import DataFrame
def listToStringwithDoubleQuotes(mylist):
copy = mylist.copy()
return '["'+ '","'.join(copy)+'"]'
def getNumberPatternTrials(base_path= 'assets/images/pattern_numbers/',n_choices =4):
'''
'''
#distances = [2,3,4]
distances = [1]
missing_index = [0,2,4]
direction = [1,-1]
problem_len = 5
choices_len= 4
min_num = 1
max_num = 30
def appendBasePath(my_list):
new_list = []
for i in range(0,len(my_list)):
item = base_path + str(my_list[i]) + ".PNG"
new_list.append(item)
return new_list
trials = []
for dist in distances:
for missing in missing_index:
for dir in direction:
if(dir == 1):
first_number = random.randint(min_num,max_num - (problem_len-1)*dist)
else:
first_number = random.randint(min_num + (problem_len-1)*dist,max_num)
numbers = [first_number + i*dist*dir for i in range(problem_len)]
translation = "\"dist:"+str(dist)+",missing:"+str(missing)+",dir:"+str(dir) +"\""
choices = []
choices.append(numbers[missing])
max_offset = dist*2
while(len(choices) < choices_len):
correct_solution = choices[0]
guess = correct_solution+random.randint(-max_offset, max_offset)
if(guess > max_num or guess < min_num):
continue
if(guess in choices):
continue
choices.append(guess)
random.shuffle(choices)
numbers = appendBasePath(numbers)
choices = appendBasePath(choices)
solution = numbers.copy()
solution_str = listToStringwithDoubleQuotes(solution)
problem = numbers.copy()
problem[missing] = ""
problem_str = listToStringwithDoubleQuotes(problem)
choices_str = listToStringwithDoubleQuotes(choices)
trials.append([problem_str,solution_str,choices_str,translation])
return trials
def getPatternUnitOfRepeatTrials(symbols = [
'assets/images/pattern_symbols/triangle.PNG',
'assets/images/pattern_symbols/tripleCircle.PNG',
'assets/images/pattern_symbols/ArrowOverlay.PNG',
'assets/images/pattern_symbols/ArrowLeft.PNG',
'assets/images/pattern_symbols/square.PNG',
'assets/images/pattern_symbols/robotface.PNG',
'assets/images/pattern_symbols/circle.PNG',
'assets/images/pattern_symbols/rhombus.PNG',
'assets/images/pattern_symbols/ArrowRight.PNG']
,choose_from=None,n_choices =3):
'''
'''
complete_patterns = [
([0,1,1,0,1,1],[[0,1,1],[0,1,1,0],[0,1]]), #1 ABBABB
([0,0,1,0,0,1],[[0,0,1],[0,0],[0,1]]), #1 AABAAB <- as in Rittle-Johnson et.al (2015)
#[0,1,0,2,0,1], #2 ABACAB
# [0,1,2,2,1,0] , #3 ABCCBA <- does not have one
# [0,1,0,1,1,0,1,1,1] , #4 ABABBABBB <- growing pattern does not have a single unit of repeat.
([0,1,2,3,4,0],[[0,1,2,3,4],[0,1,2,3,4,0],[0,1,2]]) , #5 AJDXNA
# [0,1,0,2,0,3,0,1,0,2,0,3] , #6 ABACADABACAD
# [0,1,2,3,2,1,0] , #7 ABCDCDA
# [0,1,1,1,0,1,1,0,1] , #8 ABBBABBAB
]
pattern_trans_table= ["A","B","C","D","E","F","G","H"]
trials = []
for pattern_tuple in complete_patterns:
pattern = pattern_tuple[0]
choice_patterns = pattern_tuple[1]
shuffled_sym = list(symbols)
random.shuffle(shuffled_sym)
problem = []
translation = []
for index in pattern:
problem.append(shuffled_sym[index])
translation.append(pattern_trans_table[index])
choose_from = []
for choice in choice_patterns:
choose_from.append([])
for index in choice:
choose_from[-1].append(shuffled_sym[index])
solution = choose_from[0].copy()
# problem[missing] = ""
# we also shuffle this so the correct choice is not always leftmost.
random.shuffle(choose_from)
problem = listToStringwithDoubleQuotes(problem)
solution= listToStringwithDoubleQuotes(solution)
choose_from_strings = []
for my_list in choose_from:
string_list = listToStringwithDoubleQuotes(my_list)
print("-"*80)
print(string_list)
print("-"*80)
choose_from_strings.append(string_list)
choice = listToStringwithDoubleQuotes(choose_from_strings)
choice = choice.replace("\"[\"","[\"")
choice = choice.replace("\"]\"","\"]")
translation = '"' + "".join(translation) + '"'
trials.append([problem,solution,choice,translation])
return trials
def getPatternGeneralizeTrials(symbols = [
'assets/images/pattern_symbols/triangle.PNG',
'assets/images/pattern_symbols/tripleCircle.PNG',
'assets/images/pattern_symbols/ArrowOverlay.PNG',
'assets/images/pattern_symbols/ArrowLeft.PNG',
'assets/images/pattern_symbols/square.PNG',
'assets/images/pattern_symbols/robotface.PNG',
'assets/images/pattern_symbols/circle.PNG',
'assets/images/pattern_symbols/rhombus.PNG',
'assets/images/pattern_symbols/ArrowRight.PNG']
,choose_from=None,n_choices =4):
'''
'''
complete_patterns = [
[0,1,1,0,1,1] , #1 ABBABB
[0,1,0,2,0,1], #2 ABACAB
[0,1,2,2,1,0] , #3 ABCCBA
[0,1,0,1,1,0,1,1,1] , #4 ABABBABBB
# [0,1,2,3,4,0] , #5 AJDXNA Not possible with only 4 choices..
# [0,1,0,2,0,3,0,1,0,2,0,3] , #6 ABACADABACAD
# [0,1,2,3,2,1,0] , #7 ABCDCDA
# [0,1,1,1,0,1,1,0,1] , #8 ABBBABBAB
]
pattern_trans_table= ["A","B","C","D","E","F","G","H"]
missing_pos = [0,-1]
trials = []
for pattern in complete_patterns:
for missing in missing_pos:
shuffled_sym = list(symbols)
random.shuffle(shuffled_sym)
solution = []
translation = []
for index in pattern:
solution.append(shuffled_sym[index])
translation.append(pattern_trans_table[index])
problem = solution.copy()
# problem[missing] = ""
# get the choices.
# for the generalization we want anything that does not occur in the
# original pattern
choose_from = []
while(len(choose_from) < n_choices):
next_symbol = random.choice(symbols)
if(next_symbol in problem):
continue
if(next_symbol in choose_from):
continue
choose_from.append(next_symbol)
# we also shuffle this so the correct choice is not always leftmost.
random.shuffle(choose_from)
user_input = ["" for i in range(len(problem))]
problem = listToStringwithDoubleQuotes(problem)
user_input = listToStringwithDoubleQuotes(user_input)
solution= listToStringwithDoubleQuotes(solution)
choice = listToStringwithDoubleQuotes(choose_from)
translation = '"' + "".join(translation) + '"'
trials.append([problem,solution,choice,translation,user_input])
return trials
def getPatternExtendTrials(symbols = [
'assets/images/pattern_symbols/triangle.PNG',
'assets/images/pattern_symbols/tripleCircle.PNG',
'assets/images/pattern_symbols/ArrowOverlay.PNG',
'assets/images/pattern_symbols/ArrowLeft.PNG',
'assets/images/pattern_symbols/square.PNG',
'assets/images/pattern_symbols/robotface.PNG',
'assets/images/pattern_symbols/circle.PNG',
'assets/images/pattern_symbols/rhombus.PNG',
'assets/images/pattern_symbols/ArrowRight.PNG']
,choose_from=None,n_choices =4):
'''
'''
complete_patterns = [
[0,1,1,0,1,1] , #1 ABBABB
[0,1,0,2,0,1], #2 ABACAB
[0,1,2,2,1,0] , #3 ABCCBA
[0,1,0,1,1,0,1,1,1] , #4 ABABBABBB
[0,1,2,3,4,0] , #5 AJDXNA
# [0,1,0,2,0,3,0,1,0,2,0,3] , #6 ABACADABACAD
# [0,1,2,3,2,1,0] , #7 ABCDCDA
# [0,1,1,1,0,1,1,0,1] , #8 ABBBABBAB
]
pattern_trans_table= ["A","B","C","D","E","F","G","H"]
missing_pos = [0,-1]
trials = []
for pattern in complete_patterns:
for missing in missing_pos:
shuffled_sym = list(symbols)
random.shuffle(shuffled_sym)
solution = []
translation = []
for index in pattern:
solution.append(shuffled_sym[index])
translation.append(pattern_trans_table[index])
problem = solution.copy()
problem[missing] = ""
# get the choices.
choose_from = []
# we need to have the correct one for sure.
choose_from.append(solution[missing])
# now we use some others from the pattern.
unique_symbols = set(solution)
unique_symbols.remove(solution[missing])
while(len(choose_from) < n_choices and len(unique_symbols) > 0):
choose_from.append(unique_symbols.pop())
# if we still do not have enough choices we select them from the remaining symbols.
while(len(choose_from) < n_choices):
next_symbol = random.choice(symbols)
if(next_symbol in choose_from):
continue
choose_from.append(next_symbol)
# we also shuffle this so the correct choice is not always leftmost.
random.shuffle(choose_from)
problem = listToStringwithDoubleQuotes(problem)
solution= listToStringwithDoubleQuotes(solution)
choice = listToStringwithDoubleQuotes(choose_from)
translation = '"' + "".join(translation) + '"'
trials.append([problem,solution,choice,translation])
return trials
def nonSymbTrialsFromFile():
"""
here we use pre generated trials from a file. However to have less trials we randomly choose some subset of those trials.
"""
filename = "python_templates/dot_gen_trials.csv"
df = pd.read_csv(filename, sep=",")
lst = df.to_dict('records')
lst_left = lst[:len(lst)//2]
lst_buckets = [[],[],[],[]]
for row in (lst_left):
lst_buckets[row["bucket"]-1].append(row)
for i in range(0,len(lst_buckets)):
bucket = lst_buckets[i]
lst_buckets[i]= random.sample(bucket,5)
print("-"*80)
print("buckets len:",len(lst_buckets[0]))
print("-"*80)
lst_left = lst_buckets[0]
for i in range(1,len(lst_buckets)):
lst_left.extend(lst_buckets[i])
lst_right = copy.deepcopy(lst_left)
for row_index in range(0,len(lst_left)):
tmp = lst_left[row_index]["left_num"]
lst_left[row_index]["left_num"] = lst_left[row_index]["right_num"]
lst_left[row_index]["right_num"] =tmp
lst_left[row_index]["direction"] ="Left"
lst_left.extend(lst_right)
nonSymbTrialsStats(lst_left)
return lst_left
def nonSymbTrialGen(numbers=[i for i in range(5,22)], buckets = [{'min':1,'max':1.3},{'min':1.3,'max':1.9},{'min':1.9,'max':3},{'min':3,'max':4}]):
'''
returns a dict with the fields that are needed. it does not however tell you the filename!
'''
all_matchings = []
print("starting..")
for left_num in range(22,5,-1):
for right_num in range(5,22):
# no same things.
if(left_num <= right_num):
print("left:",left_num,"right",right_num, "BREAK")
break
continue
higher_num = max(left_num,right_num)
lower_num = min(left_num,right_num)
this_match = {"left_num":left_num,"right_num":right_num,"solution":higher_num,"ratio":round(higher_num/lower_num,2),"solution":"RIGHT"}
this_match['bucket'] = -1
for bucket_index in range(0,len(buckets)):
bucket = buckets[bucket_index]
if this_match['ratio'] <= bucket['max'] and this_match['ratio'] >= bucket['min']:
this_match['bucket'] = bucket_index
break
# if it does not fit into a bucket we dont need it.
"""
if(this_match['bucket'] == -1):
continue
"""
all_matchings.append(this_match)
# until here left is always larger.
all_matchings = random.sample(all_matchings,20)
matchings_left = copy.deepcopy(all_matchings)
for row_index in range(0,len(matchings_left)):
tmp = matchings_left[row_index]["left_num"]
matchings_left[row_index]["left_num"] = matchings_left[row_index]["right_num"]
matchings_left[row_index]["right_num"] =tmp
matchings_left[row_index]["solution"] ="LEFT"
all_matchings.extend(matchings_left)
# debug:
df = DataFrame.from_records(all_matchings)
df.to_csv("generated_trials.csv")
return all_matchings
def nonSymbTrialsStats(data):
buckets_n = [0,0,0,0]
directions = [0,0]
for match_index in range(0,len(data)):
match = data[match_index]
#print("left:",match["left_num"],"right:",match["right_num"])
buckets_n[match["bucket"] -1] += 1
print(":-:"*20)
print("total len:",len(data))
print("buckets freq:",buckets_n)
print(":-:"*20)
return
def genNonSymbTrials():
'''
this function knows how to name the files and what to do with the trials generated by nonsymTrialGen
'''
#all_matchings = nonSymbTrialGen()
all_matchings = nonSymbTrialsFromFile()
nonSymbTrialsStats(all_matchings)
ids_area_control = [i for i in range(0,17)]
ids_radius_control = [i for i in range(17,34)]
# we have each match 2 times. once for the area controlled and another time for the radius.
min_n_dots = 5
trials = []
for match_index in range(0,len(all_matchings)):
match = all_matchings[match_index]
left_num = match['left_num']
right_num = match['right_num']
solution_num = match['solution']
left_filename_area = "id_"+str(ids_area_control[left_num-min_n_dots])+"_dots_"+str(left_num)+".png"
right_filename_area = "id_"+str(ids_area_control[right_num-min_n_dots])+"_dots_"+str(right_num)+".png"
solution_filename_area = "id_"+str(ids_area_control[solution_num-min_n_dots])+"_dots_"+str(solution_num)+".png"
trials.append([left_filename_area,right_filename_area,solution_filename_area,json.dumps(match)])
'''
left_filename_radius = "id_"+str(ids_radius_control[left_num-min_n_dots])+"_dots_"+str(left_num)+".png"
right_filename_radius = "id_"+str(ids_radius_control[right_num-min_n_dots])+"_dots_"+str(right_num)+".png"
solution_filename_radius = "id_"+str(ids_radius_control[solution_num-min_n_dots])+"_dots_"+str(solution_num)+".png"
trials.append([left_filename_radius,right_filename_radius,solution_filename_radius,match])
'''
return trials
def genSymbolicNumberCompareTrials(distances=[1,2,3,4,5,6,7,8],numbers=[1,2,3,4,5,6,7,8,9]):
'''
our output tells us that we have:
have 8 for distance: 1
have 7 for distance: 2
have 4 for distance: 5
have 3 for distance: 6
so we use the distance 5 and 6 double to make it more balanced
'''
trials_per_dist = []
for dist in distances:
dist_poss_count = 0
cur_dist = []
for num in numbers:
left_num = num
right_num = num+dist
solution = max(left_num,right_num)
if (not left_num in numbers) or (not right_num in numbers):
continue
# append both directions
trials = []
trials.append([left_num,right_num,solution,dist])
trials.append([right_num,left_num,solution,dist])
dist_poss_count +=1
cur_dist.append(trials)
trials_per_dist.append(cur_dist)
print("have ",dist_poss_count,"for distance:",dist)
for i in range(0,len(trials_per_dist)):
n_per_dist = 3
# the trials are longer than n_per_dist so we randomly select some trials.
if(len(trials_per_dist[i])>= n_per_dist):
trials_per_dist[i] = random.sample(trials_per_dist[i],n_per_dist)
continue
# the trials are shorter than n_per_dist so we use the trials multiple times.
cur_dist = copy.deepcopy(trials_per_dist[i])
while(len(cur_dist)+len(trials_per_dist[i]) <= n_per_dist):
cur_dist.extend(trials_per_dist[i])
if(len(cur_dist) < n_per_dist):
diff = n_per_dist - len(cur_dist)
cur_dist.extend(random.sample(trials_per_dist[i],diff))
trials_per_dist[i] = cur_dist
trials = []
for dist in trials_per_dist:
for d in dist:
for trial in d:
trials.append(trial)
return trials
def appendPathToNum(num,base_path= "assets/images/pattern_numbers/",file_end=".PNG"):
return base_path + str(num)+file_end
def getNewNumericalCompTrials():
trials = []
base_path = "assets/images/pattern_numbers/"
file_end = ".PNG"
for trial in genSymbolicNumberCompareTrials():
trials.append([base_path+str(trial[0])+file_end,base_path+str(trial[1])+file_end,base_path+str(trial[2])+file_end,"\"left:"+str(trial[0])+",right"+str(trial[1])+",dist"+str(trial[3])+"\""])
return trials
def genNewOrdinalNumberVerificationTrials(filename="python_templates/ord_trials.csv"):
df = pd.read_csv(filename, sep=",")
trial_dict = df.to_dict('records')
trials = []
for row in trial_dict:
trials.append([appendPathToNum(row['left']),appendPathToNum(row['middle']),appendPathToNum(row['right']),row['solution'],row])
return trials
def genOrdinalNumberVerificationTrials(distances=[1,2,3],numbers=[1,2,3,4,5,6,7,8,9]):
'''
our output tells us that we have:
have 7 for distance: 1
have 5 for distance: 2
have 3 for distance: 3
so we use the distance 3 ones double to make it more balanced
'''
dist_lsts = []
for dist in distances:
dist_poss_count = 0
lst_per_dist = []
for num in numbers:
left_num = num
mid_num = num+dist
right_num = num+2*dist
solution = "IN_ORDER"
if (not left_num in numbers) or (not mid_num in numbers) or (not right_num in numbers):
continue
trials = []
# append both directions
trials.append([appendPathToNum(left_num),appendPathToNum(mid_num),appendPathToNum(right_num),solution,"ASCENDING",dist])
trials.append([appendPathToNum(right_num),appendPathToNum(mid_num),appendPathToNum(left_num),solution,"DESCENDING",dist])
solution = "MIXED_ORDER"
# ascending mixed
trials.append([appendPathToNum(mid_num),appendPathToNum(right_num),appendPathToNum(left_num),solution,"ASCENDING",dist])
#descending mixed
trials.append([appendPathToNum(mid_num),appendPathToNum(left_num),appendPathToNum(right_num),solution,"DESCENDING",dist])
lst_per_dist.append(trials)
dist_poss_count +=1
print("have ",dist_poss_count,"for distance:",dist)
dist_lsts.append(lst_per_dist)
for i in range(0,len(dist_lsts)):
n_per_dist = 4
if(len(dist_lsts[i])>= n_per_dist):
dist_lsts[i] = random.sample(dist_lsts[i],n_per_dist)
continue
trials_for_dist = copy.deepcopy(dist_lsts[i])
while(len(trials_for_dist)+len(dist_lsts[i]) <= n_per_dist):
trials_for_dist.extend(dist_lsts[i])
if(len(trials_for_dist) < n_per_dist):
diff = n_per_dist - len(trials_for_dist)
trials_for_dist.extend(random.sample(dist_lsts[i],diff))
dist_lsts[i] = trials_for_dist
trials = []
for dist in dist_lsts:
for d in dist:
for trial in d:
trials.append(trial)
print("-"*80)
print(trials)
print("-"*80)
return trials
def genNumLineTrials(ranges=[x for x in range(32,64+1)],targets=[x for x in range(4,32+1)]):
'''
our output tells us that we have:
so we use the distance 3 ones double to make it more balanced
'''
trials = []
for rang in ranges:
for num in targets:
trials.append([rang,num])
trials = random.sample(trials,24)
return trials
def getPossibleMatchings(numbers_left=[2,3,4,5,6,7,8,9],numbers_right=[2,3,4,5,6,7,8,9]):
matchings = []
for num_1 in numbers_left:
for num_2 in numbers_right:
matchings.append([num_1,num_2])
return matchings
def getTripleAdditionTrials(numbers=[i for i in range(10,100)]):
calcs = []
for num_1 in numbers:
for num_2 in numbers:
for num_3 in numbers:
solution = num_1+num_2+num_3
calcs.append([num_1,num_2,num_3,solution])
n_two_digits = 60
calcs = random.sample(calcs,n_two_digits)
template_friendly_trials = []
for calc in calcs:
template_friendly_trials.append([str(calc[0])+"+"+str(calc[1]),calc[2],calc[3]])
return template_friendly_trials
def genAdditionTrials(numbers=[2,3,4,5,6,7,8,9]):
'''
'''
trials = []
# single digit numbers
matchings = getPossibleMatchings(numbers_left=numbers,numbers_right=numbers)
for match in matchings:
solution = match[0]+match[1]
trials.append([match[0],match[1],solution])
two_digit_trials = []
matchings = getPossibleMatchings(numbers_left=[i for i in range(10,100)],numbers_right=[i for i in range(10,100)])
for match in matchings:
solution = match[0]+match[1]
two_digit_trials.append([match[0],match[1],solution])
# solution must be 2 digits also
two_digit_trials = [i for i in two_digit_trials if not i[2] >= 100]
# only with "zehnerübergang"
two_digit_trials = [i for i in two_digit_trials if ((i[0]%10) + (i[1]%10))>10]
# ignore stuff with the same last digit (66+36,...)
two_digit_trials = [i for i in two_digit_trials if not (i[0]%10) == (i[1]%10)]
# ignore numbers with same digits (55,66,...)
two_digit_trials = [i for i in two_digit_trials if not (((i[0]%10) == ((i[0]//10)%10)) or ((i[1]%10) == ((i[1]//10)%10)) or ((i[2]%10) == ((i[2]//10)%10)))]
print("len two digits.",len(two_digit_trials))
n_two_digits = 60
two_digit_trials = random.sample(two_digit_trials,n_two_digits)
# no same operants
trials = [i for i in trials if not i[0] == i[1]]
# ignore 2 digit operants for kids.
# trials = trials + two_digit_trials
return trials
def genMultiplicationTrials(numbers = [2,3,4,5,6,7,8,9]):
trials = []
for num_1 in numbers:
for num_2 in numbers:
if(num_1 == num_2):
continue
solution = num_1*num_2
trials.append([num_1,num_2,solution])
two_digit_trials = getPossibleMatchings(numbers_left=[i for i in range(12,100)],numbers_right=[i for i in range(3,10)])
for numbers in two_digit_trials:
# add the mult result.
numbers.append(numbers[0]*numbers[1])
#two_digit_trials = [i for i in two_digit_trials if not (i[0] == i[1])]
two_digit_trials = [i for i in two_digit_trials if (i[2] < 100)]
#two_digit_trials = [i for i in two_digit_trials if not (((i[0]%10) == ((i[0]//10)%10)) or ((i[1]%10) == ((i[1]//10)%10)) or ((i[2]%10) == ((i[2]//10)%10)))]
print("len two digits.",len(two_digit_trials))
#n_two_digits = 60
#two_digit_trials = random.sample(two_digit_trials,n_two_digits)
return trials + two_digit_trials
def genSubtractionTrials(numbers= [2,3,4,5,6,7,8,9]):
trials = genAdditionTrials(numbers)
for trial in trials:
# swap the solution with the first operant
print(trial)
temp = trial[0]
trial[0] = trial[2]
trial[2] = temp
return trials
def genTypingTrials(numbers=[x for x in range(1,300)]):
'''
we want to resemble the arithmetic trials as this task will be the control for that.
there we have solutions with the following digits:
single 152 double 245 triple 55
so we will do the same here.
'''
trials = []
numbers = [i for i in range(1,20)]
for num in numbers:
trials.append([num,num])
return trials
n_singles = 152
n_doubles = 245
n_triple = 55
single_digit_numbers = [i for i in range(0,10)]
double_digit_numbers = [i for i in range(10,100)]
# we choose the range of 300 as this is also the case with the arithmetic trials
triple_digit_numbers = [i for i in range(100,300)]
for single in range(0,n_singles):
solution = random.choice(single_digit_numbers)
trials.append([solution,solution])
for double in range(0,n_doubles):
solution = random.choice(double_digit_numbers)
trials.append([solution,solution])
for triple in range(0,n_triple):
solution = random.choice(triple_digit_numbers)
trials.append([solution,solution])
return trials
def insertListIntoTemplate(template_string,numbers_list):
'''
insertLIstIntoTemplate
inserts a list into a template
the template string needs to have the spots marked with INSERT_HERE_0,INSERT_HERE_1,...
@ret the string with everything replaced.
'''
for index in range(0,len(numbers_list)):
template_string = template_string.replace("INSERT_HERE_"+str(index),str(numbers_list[index]))
return template_string
def genMathAnxietyTrials():
return [[i] for i in range(1,14)]
def main():
generatorMap = {"symb":getNewNumericalCompTrials,
"ord":genOrdinalNumberVerificationTrials,#genNewOrdinalNumberVerificationTrials,
"numline":genNumLineTrials,
"add":genAdditionTrials,
"triple_add":getTripleAdditionTrials,
"sub":genSubtractionTrials,
"mult":genMultiplicationTrials,
"speed":genTypingTrials,
"nonsymb":genNonSymbTrials,
"mathanxiety":genMathAnxietyTrials,
"pattern_extend":getPatternExtendTrials,
"pattern_generalize":getPatternGeneralizeTrials,
"pattern_uof":getPatternUnitOfRepeatTrials,
"pattern_num":getNumberPatternTrials,
}
parser = argparse.ArgumentParser(description="Trial generator for DNA Tasks.")
parser.add_argument('task',choices=generatorMap.keys(),
help="this arg lets you choose the task you want to generate the trials for.")
parser.add_argument('template', help="path to template file.")
# in order to get replicable results we use a random seed
#random.seed(42)
args = parser.parse_args()
trials = generatorMap[args.task]()
df = DataFrame.from_records(trials)
df.to_csv("generated_trials/"+args.task+".csv",sep=";")
template_file = args.template
temp_string = ""
with open(template_file) as tempfile:
temp_string = tempfile.read()
string_with_all_trials = ""
for trial in trials:
string_with_all_trials += insertListIntoTemplate(temp_string,trial)
print(string_with_all_trials)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
228442
|
<reponame>yyht/topmine_py3<gh_stars>1-10
import numpy as np
def merge_single_char(phrase):
segment_lst = phrase[0].strip().split()
leng = [len(item) for item in segment_lst]
|
StarcoderdataPython
|
8050832
|
# -*- coding: utf-8 -*-
import os
import sys
import cv2
import pytest
import mock
import numpy as np
import sksurgeryutils.common_overlay_apps as coa
def test_OverlayOnVideoFeedCropRecord_from_file(setup_qt, tmpdir):
in_github_ci = os.environ.get('CI')
if in_github_ci and sys.platform.startswith("linux"):
pytest.skip("Test not working on Linux runner \
because of unknown issue.")
input_file = 'tests/data/100x50_100_frames.avi'
out_file = os.path.join(tmpdir, 'overlay_test.avi')
overlay_app = coa.OverlayOnVideoFeedCropRecord(input_file, out_file)
# Start app and get a frame from input, so that
# the window is showing something, before we start
# recording.
overlay_app.start()
overlay_app.update()
overlay_app.on_record_start()
for i in range(50):
overlay_app.update()
overlay_app.on_record_stop()
overlay_app.stop()
# Check that 50 frames were actually written to the output file
output_video = cv2.VideoCapture(out_file)
for i in range(50):
ret, _ = output_video.read()
assert ret
# Trying to read 51st frame should return False
ret, _ = output_video.read()
assert not ret
output_video.release()
def test_OverlayOnVideoFeedCropRecord_from_webcam(setup_qt):
"""
Test will only run if there is a camera avilable.
"""
# Try to open a camera. If one isn't available, the rest of test
# will be skipped.
source = 0
cam = cv2.VideoCapture(source)
if not cam.isOpened():
pytest.skip("No camera available")
cam.release()
# Don't pass an output filename as a parameter, so that
# the code to generate a filename from current date/time is executed.
overlay_app = coa.OverlayOnVideoFeedCropRecord(0)
# Start app and get a frame from input, so that
# the window is showing something, before we start
# recording.
overlay_app.start()
overlay_app.update()
overlay_app.on_record_start()
for i in range(50):
overlay_app.update()
overlay_app.on_record_stop()
overlay_app.stop()
def test_OverlayBaseAppRaisesNotImplementedError(setup_qt):
class ErrorApp(coa.OverlayBaseApp):
def something(self):
pass
with pytest.raises(NotImplementedError):
input_file = 'tests/data/100x50_100_frames.avi'
overlay_app = ErrorApp(input_file)
overlay_app.update()
def test_OverlayOnVideoFeedCropRecord_set_roi(setup_qt):
input_file = 'tests/data/100x50_100_frames.avi'
overlay_app = coa.OverlayOnVideoFeedCropRecord(input_file)
overlay_app.update() # Get a frame so that we can crop it
with pytest.raises(RuntimeError):
overlay_app.set_roi()
def test_DuplicateOverlayWindow(setup_qt):
input_file = 'tests/data/100x50_100_frames.avi'
overlay_app = coa.OverlayOnVideoFeed(input_file)
duplicate = coa.DuplicateOverlayWindow()
duplicate.set_source_window(overlay_app)
overlay_app.update()
duplicate.update()
np.testing.assert_array_equal(overlay_app.img, duplicate.vtk_overlay_window.input)
def test_DuplicateOverlayWindowWithCrop(setup_qt):
input_file = 'tests/data/100x50_100_frames.avi'
overlay_app = coa.OverlayOnVideoFeedCropRecord(input_file)
duplicate = coa.DuplicateOverlayWindow()
duplicate.set_source_window(overlay_app)
overlay_app.update()
duplicate.update()
np.testing.assert_array_equal(overlay_app.img, duplicate.vtk_overlay_window.input)
|
StarcoderdataPython
|
1827458
|
<gh_stars>1-10
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-06-27 00:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('volt', '0010_auto_20190620_1906'),
]
operations = [
migrations.AlterField(
model_name='technologyprofile_decl',
name='technology',
field=models.CharField(choices=[(b'gpon', b'gpon'), (b'xgspon', b'xgspon')], db_index=True, help_text=b'The technology being utilized by the adaptor', max_length=16),
),
]
|
StarcoderdataPython
|
8092540
|
text = open("out-sorted.txt").read().strip("\n")
a = []; b = []
for i in text.split("\n"):
a.append(i.split(",")[0])
for i in text.split("\n"):
b.append(i.split(",")[1])
text = ""
for i in range(0, len(b)):
text += chr(int(a[i]))
print text
|
StarcoderdataPython
|
1715694
|
#!/usr/bin/python3
from serial import Serial
from time import sleep
ser = Serial('/dev/ttyUSB0', baudrate=115200)
# open music.raw unsigned 8-bit PCM audio
with open('music.raw','rb') as f:
b = f.read(16)
while(len(b)>0):
ser.write(b)
ser.flush()
b = f.read(16)
print("done")
|
StarcoderdataPython
|
11248178
|
<filename>src/tests/utils.py<gh_stars>1-10
import os
from abc import abstractmethod
from unittest import TestCase
from PIL import Image
SHOW_MISMATCH = True
ASSERT_ON = False
def get_reference_img_path(folder, *args):
return get_reference_dir_path(folder) + '_'.join(map(str, args)) + '.png'
def get_reference_dir_path(folder):
return 'assets/tests/reference/' + folder + '/'
def assert_images_equal(im1: Image, im2: Image, ref_path, args):
assert im1.mode == im2.mode, im1.mode + " " + im2.mode
assert im1.size == im2.size
for pix1, pix2 in zip(im1.getdata(), im2.getdata()):
if pix1 != pix2:
print(f'test failed with args: {args}')
if SHOW_MISMATCH:
im1.show(title="reference " + ref_path)
im2.show(title="tested " + ref_path)
if ASSERT_ON:
assert pix1 == pix2, str(pix1) + " " + str(pix2) + " " + ref_path
return
print(f'test passed with args: {args}')
class ComponentRegressionTestCase(TestCase):
@staticmethod
def generate_test_variants():
pass
@staticmethod
def generate_component(*args):
pass
@classmethod
def generate_reference(cls):
os.makedirs(get_reference_dir_path(cls.__name__), exist_ok=True)
print('Generating: ' + get_reference_dir_path(cls.__name__))
variants = cls.generate_test_variants()
for args in variants:
component = cls.generate_component(*args)
im = component.image()
im.save(get_reference_img_path(cls.__name__, *args))
def execute(self):
for args in self.generate_test_variants():
im = self.generate_component(*args).image()
path = get_reference_img_path(type(self).__name__, *args)
im_ref = Image.open(path)
assert_images_equal(im_ref, im, path, args)
|
StarcoderdataPython
|
4990917
|
<reponame>Bakkhos/aepp-sdk-python
import logging
import os
from aeternity.config import Config
from aeternity.signing import Account
from aeternity import node
# for tempdir
import shutil
import tempfile
from contextlib import contextmanager
import random
import string
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.DEBUG)
logging.getLogger("aeternity").setLevel(logging.DEBUG)
logging.root.setLevel(logging.DEBUG)
PUBLIC_KEY = os.environ.get('WALLET_PUB')
PRIVATE_KEY = os.environ.get('WALLET_PRIV')
NODE_URL = os.environ.get('TEST_URL')
NODE_URL_DEBUG = os.environ.get('TEST_DEBUG_URL')
NETWORK_ID = os.environ.get('TEST_NETWORK_ID')
# set the key folder as environment variables
genesis = Account.from_public_private_key_strings(PUBLIC_KEY, PRIVATE_KEY)
# default values for tests
TEST_FEE = 20000
TEST_TTL = 50
Config.set_defaults(Config(
external_url=NODE_URL,
internal_url=NODE_URL_DEBUG,
network_id=NETWORK_ID
))
# Instantiate the node client for the tests
NODE_CLI = node.NodeClient(blocking_mode=True, debug=True, native=False)
# create a new account and fill it with some money
ACCOUNT = Account.generate()
NODE_CLI.spend(genesis, ACCOUNT.get_address(), 1000000000)
a = NODE_CLI.get_account_by_pubkey(pubkey=ACCOUNT.get_address())
print(f"Test account is {ACCOUNT.get_address()} with balance {a.balance}")
ACCOUNT_1 = Account.generate() # required for oracles
NODE_CLI.spend(genesis, ACCOUNT_1.get_address(), 1000000000)
a = NODE_CLI.get_account_by_pubkey(pubkey=ACCOUNT_1.get_address())
print(f"Test account (1) is {ACCOUNT_1.get_address()} with balance {a.balance}")
@contextmanager
def tempdir():
# contextmanager to generate and delete a temporary directory
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
def random_domain(length=10):
rand_str = ''.join(random.choice(string.ascii_letters) for _ in range(length))
return f"{rand_str}.test"
|
StarcoderdataPython
|
11227915
|
import duckdb
try:
import pyarrow
import pyarrow.parquet
import urllib.request
can_run = True
except:
can_run = False
class TestArrow(object):
def test_arrow(self, duckdb_cursor):
if not can_run:
return
parquet_filename = 'userdata1.parquet'
urllib.request.urlretrieve('https://github.com/cwida/duckdb-data/releases/download/v1.0/userdata1.parquet', parquet_filename)
cols = 'id, first_name, last_name, email, gender, ip_address, cc, country, birthdate, salary, title, comments'
# TODO timestamp
userdata_parquet_table = pyarrow.parquet.read_table(parquet_filename)
userdata_parquet_table.validate(full=True)
rel_from_arrow = duckdb.arrow(userdata_parquet_table).project(cols).arrow()
rel_from_arrow.validate(full=True)
rel_from_duckdb = duckdb.from_parquet(parquet_filename).project(cols).arrow()
rel_from_duckdb.validate(full=True)
# batched version, lets use various values for batch size
for i in [7, 51, 99, 100, 101, 500, 1000, 2000]:
userdata_parquet_table2 = pyarrow.Table.from_batches(userdata_parquet_table.to_batches(i))
assert userdata_parquet_table.equals(userdata_parquet_table2, check_metadata=True)
rel_from_arrow2 = duckdb.arrow(userdata_parquet_table2).project(cols).arrow()
rel_from_arrow2.validate(full=True)
assert rel_from_arrow.equals(rel_from_arrow2, check_metadata=True)
assert rel_from_arrow.equals(rel_from_duckdb, check_metadata=True)
con = duckdb.connect()
con.execute("select NULL c_null, (c % 4 = 0)::bool c_bool, (c%128)::tinyint c_tinyint, c::smallint*1000 c_smallint, c::integer*100000 c_integer, c::bigint*1000000000000 c_bigint, c::float c_float, c::double c_double, 'c_' || c::string c_string from (select case when range % 2 == 0 then range else null end as c from range(-10000, 10000)) sq")
arrow_result = con.fetch_arrow_table()
arrow_result.validate(full=True)
arrow_result.combine_chunks()
arrow_result.validate(full=True)
round_tripping = duckdb.from_arrow_table(arrow_result).to_arrow_table()
round_tripping.validate(full=True)
assert round_tripping.equals(arrow_result, check_metadata=True)
|
StarcoderdataPython
|
9643270
|
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import platform
def create_cloud(word_list):
pf = platform.system()
if pf == 'Windows':
font_path = r"C:\WINDOWS\Fonts\UDDIGIKYOKASHON-R.TTC"
elif pf == 'Darwin':
font_path = "/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc"
elif pf == 'Linux':
print('None')
# ストップワードの設定
stop_words = [u'てる', u'いる', u'なる', u'れる', u'する', u'ある', u'こと', u'これ', u'さん', u'して',
u'くれる', u'やる', u'くださる', u'そう', u'せる', u'した', u'思う', u'ます',
u'それ', u'ここ', u'ちゃん', u'くん', u'って', u'て', u'に', u'を', u'は', u'の', u'が', u'と', u'た', u'し', u'で',
u'ない', u'も', u'な', u'い', u'か', u'ので', u'よう', u'から', u'けど',
'https', 't', '.', '/', '://', 'co', '@', '_', 'http',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'()', '!']
# print(word_list)
word_cloud = WordCloud(background_color="black", font_path=font_path, width=3500, height=2000, max_words=500,
stopwords=set(stop_words)).generate(word_list)
plt.figure(figsize=(20, 12))
plt.imshow(word_cloud)
plt.axis("off")
plt.show()
# plt.savefig('./test.png')
if __name__ in '__main__':
part = ''
with open('./document/test.txt', mode='r', encoding='utf-8') as text_file:
text, test = wakati(text_file.read(), part)
word_list = " ".join(test)
print(word_list)
create_cloud(word_list)
|
StarcoderdataPython
|
11237079
|
<reponame>VaCH2/tosca-analysis
import os
import pandas as pd
#calculator is the class that calculates the source code measurements upon provided TOSCA blueprints
from toscametrics import calculator
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
root_folder = os.path.dirname(os.path.dirname( __file__ ))
temp_data_folder = os.path.join(root_folder, 'temp_data', 'source_code_measurements')
if not os.path.exists(temp_data_folder):
os.makedirs(temp_data_folder)
class Data():
def __init__(self, split='all', metrics_type='tosca_and_general'):
'''A dictionary were the keys are the possible alternatives in the provided split.
The value is the corresponding, filtered dataframe.'''
try:
raw_df = pickle.load(open(os.path.join(temp_data_folder, 'all_raw_df'), 'rb'))
except (OSError, IOError):
files = self.get_indices('all', None)
json_data = self.json_data(metrics_type, files.get('all'))
raw_df = self.to_df(json_data)
pickle.dump(raw_df, open(os.path.join(temp_data_folder, 'all_raw_df'), 'wb'))
self.raw_df = raw_df
raw_size = self.raw_df.shape[0]
try:
df = pickle.load(open(os.path.join(temp_data_folder, 'all_df'), 'rb'))
except (OSError, IOError):
df = self.cleaning(self.raw_df)
pickle.dump(df, open(os.path.join(temp_data_folder, 'all_df'), 'wb'))
cleaned_size = df.shape[0]
self.droppedrows = raw_size - cleaned_size
split_indices = self.get_indices(split, df)
#Include only valid
#Because get_indices looks at all files, so does not exclude the ones dropped during cleaning
#We also rename the index to the relative path.
filtered_dfs = {}
for split, files in split_indices.items():
files = [file.replace('c', 'C', 1) if file[0] == 'c' else file for file in files]
#files = [file.replace('\\analysis\\..\\', '\\') for file in files]
files = [file for file in files if file in list(df.index)]
if len(files) == 0:
continue
ix_mapping = {file : file.split('tmp\\')[1] for file in files}
filtered_df = df.loc[files]
filtered_df = filtered_df.rename(index=ix_mapping)
filtered_dfs[split] = filtered_df
self.dfs = filtered_dfs
def get_indices(self, split, df):
'''Filters the provided dataframe on the desired split and returns the indices of the filtered dataframe'''
data_path = os.path.join(root_folder, 'dataminer', 'tmp')
owners = [ item for item in os.listdir(data_path)]
owner_and_repo = []
for owner in owners:
repos = os.listdir(os.path.join(data_path, owner))
for repo in repos:
owner_and_repo.append((owner, repo))
professionalities = ['Example', 'Industry']
if split == 'repo':
split_paths = {f'{oar[0]}-{oar[1]}' : [os.path.join(data_path, oar[0], oar[1])] for oar in owner_and_repo}
elif split == 'professionality':
repo_paths = [os.path.join(data_path, oar[0], oar[1]) for oar in owner_and_repo]
split_paths = {}
for prof in professionalities:
split_paths[prof] = [os.path.join(repo_path, prof) for repo_path in repo_paths]
elif split == 'purpose':
split_files = self.filter_filetype(df)
elif split == 'all':
split_paths = {'all' : [data_path]}
else:
raise ValueError
if split != 'purpose':
split_files = {}
for split, paths in split_paths.items():
files = []
for path in paths:
files.extend(self.get_yaml_files(path))
split_files[split] = files
return split_files
def get_yaml_files(self, path):
'''Returns all the files with a YAML extension found in the provided path'''
extensions = ['.yaml', '.yml']
allFiles = []
listOfFile = os.listdir(path)
for entry in listOfFile:
fullPath = os.path.join(path, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + self.get_yaml_files(fullPath)
else:
for extension in extensions:
if fullPath.endswith(extension):
allFiles.append(fullPath)
return allFiles
def calculate_vectors(self, instanceblock):
'''Transforms the provided instanceblock (string) into vectors'''
vectorizer = CountVectorizer(token_pattern='[^\s]+').fit(instanceblock)
vectorizer = vectorizer.transform(instanceblock)
vectors =vectorizer.toarray()
return vectors
def calculate_cosine(self, vec1, vec2):
'''Calculates the cosine similarity score'''
vec1 = vec1.reshape(1, -1)
vec2 = vec2.reshape(1, -1)
return cosine_similarity(vec1, vec2)[0][0]
def check_similarity(self, file_list):
'''Calculates the similarity score for each pair of the provided files and returns this in a list'''
try:
sims = pickle.load(open(os.path.join(temp_data_folder, 'similarity_scores'), 'rb'))
except (OSError, IOError):
string_list = []
for filePath in file_list:
try:
with open(filePath, 'r') as file:
yml = file.read()
except UnicodeDecodeError:
with open(filePath, 'r', encoding='utf-8') as file:
yml = file.read()
string_list.append(yml)
vectors = self.calculate_vectors(string_list)
sims = []
for i in list(enumerate(vectors)):
next_index = i[0] + 1
for j in list(enumerate(vectors))[next_index:]:
sims.append((i[0], j[0], self.calculate_cosine(i[1], j[1])))
pickle.dump(sims, open(os.path.join(temp_data_folder, 'similarity_scores'), 'wb'))
return sims
def json_data(self, metrics_type, yaml_files):
'''Calculates all the metrics over the provided files'''
metrics = calculator.MetricCalculator(yaml_files, metrics_type).getresults
return metrics
def to_df(self, json_data):
'''Transforms a JSON file to Pandas DataFrame'''
flat_dict = {}
for key, value in json_data.items():
df = pd.io.json.json_normalize(value, sep='_')
value = df.to_dict(orient='records')[0]
flat_dict[key] = value
df = pd.DataFrame.from_dict(flat_dict, orient='index')
return df
def cleaning(self, df):
'''Applies cleaning steps on the provided dataframe. Steps are: delete similar files,
check if files are valid tosca files, drop error message columns, drop rows containing nan
and make every column numeric.'''
print('size raw df: ', df.shape)
#Check similarity
similarity_scores = self.check_similarity(list(df.index))
similar_files = [pair for pair in similarity_scores if pair[2] == 1]
#Because in order so multiple duplicates will be deleted eventually
#here range because we have a numerical index, in the next one we have the actual index
to_exclude = [pair[1] for pair in similar_files]
ixs_to_keep = [ix for ix in range(df.shape[0]) if ix not in to_exclude]
df = df.iloc[ixs_to_keep]
print('size df after similarity deletion: ', df.shape)
#Check valid tosca
tosca_metrics = ['na_count', 'nc_count', 'nc_min', 'nc_max', 'nc_median', 'nc_mean', 'ni_count',
'nif_count', 'ninp_count', 'ninpc_count', 'nn_count', 'nout_count', 'np_count',
'np_min', 'np_max', 'np_median', 'np_mean', 'nr_count', 'ttb_check', 'cdnt_count',
'cdrt_count', 'cdat_count', 'cdct_count', 'cddt_count', 'cdgt_count', 'cdit_count', 'cdpt_count',
'nw_count', 'tdb_check', 'nrq_count', 'nsh_count', 'ncys_count', 'tob_check',
'ngro_count', 'npol_count', 'nf_count']
check_tosca_df = df[tosca_metrics]
check_tosca_df['valid_file'] = check_tosca_df.any(1)
to_exclude = list(check_tosca_df[check_tosca_df['valid_file'] == False].index)
ixs_to_keep = [ix for ix in list(df.index) if ix not in to_exclude]
df = df.loc[ixs_to_keep]
print('size df after invalid TOSCA deletion: ', df.shape)
#Drop NaN rows and error columns, and make numeric
df = df.drop(labels=(df.filter(regex='msg').columns), axis=1)
df = df.dropna()
cols = df.select_dtypes(include=['bool', 'object']).columns
df[cols] = df[cols].astype(int)
df = df.dropna()
print('size df after NaN and error column drops: ', df.shape)
return df
def filter_filetype(self, original_df):
'''Filter on the file type. A file could be a service template, or containing
custom type definitions, both or none of these two. It returns a dictionary with the indices
belonging to each purpose.'''
df = original_df.copy()
custom_indicators = ['cdnt_count', 'cdrt_count', 'cdat_count', 'cdct_count', 'cddt_count', 'cdgt_count', 'cdit_count', 'cdpt_count']
cus_df = df[custom_indicators]
non_df = cus_df[(cus_df == 0).all(1)]
df['custom_def'] = [False if x in non_df.index else True for x in df.index]
split_paths = {}
split_paths['topology'] = df[(df['ttb_check'] == 1) & (df['custom_def'] == False)].index
split_paths['custom'] = df[(df['ttb_check'] == 0) & (df['custom_def'] == True)].index
split_paths['both'] = df[(df['ttb_check'] == 1) & (df['custom_def'] == True)].index
assigned_indices = list(split_paths['topology']) + list(split_paths['custom']) + list(split_paths['both'])
not_assigned_indices = [ix for ix in list(df.index) if ix not in assigned_indices]
split_paths['none'] = df.loc[not_assigned_indices].index
return split_paths
|
StarcoderdataPython
|
4886192
|
import redis
import time
import math
class RedisMonitor:
"""
Monitor Redis keys and send updates to all web socket clients.
"""
def __init__(self, host="localhost", port=6379, password="", db=0, refresh_rate=0.5, key_filter="", realtime=False):
"""
If realtime is specified, RedisMonitor will enable notifications for all
set events and subscribe to these notifications.
"""
self.host = host
self.port = port
self.password = password
self.db = db
self.refresh_rate = refresh_rate
self.key_filter = key_filter
self.realtime = realtime
self.redis_db = redis.Redis(host=self.host, port=self.port, password=<PASSWORD>, db=self.db, decode_responses=False)
self.message_last = {}
if self.realtime:
self.pubsub = self.redis_db.pubsub()
self.lock = threading.Lock()
self.message_buffer = []
# Need to perform the following command to enable keyevent notifications:
# config set notify-keyspace-events "$E"
notify_keyspace_events = self.redis_db.config_get("notify-keyspace-events")["notify-keyspace-events"]
if "$" not in notify_keyspace_events and "A" not in notify_keyspace_events:
# Add string commands to notifications
notify_keyspace_events += "$"
if "E" not in notify_keyspace_events:
# Add keyevent events to notifications
notify_keyspace_events += "E"
self.redis_db.config_set("notify-keyspace-events", notify_keyspace_events)
self.pubsub.psubscribe("__keyevent@%s__:set" % self.db)
def messenger(self, ws_server):
"""
When realtime is set, this thread sends messages to all web socket
clients every refresh_rate seconds.
"""
while True:
time.sleep(self.refresh_rate)
self.lock.acquire()
if not self.message_buffer:
self.lock.release()
continue
keyvals = self.message_buffer
self.message_buffer = []
self.lock.release()
ws_server.lock.acquire()
for client in ws_server.clients:
client.send(ws_server.encode_message(keyvals))
ws_server.lock.release()
def parse_val(self, key, skip_unchanged=True):
"""
Get the value from Redis and parse if it's an array.
If skip_unchanged = True, only returns values updated since the last call.
"""
import re
def isnumeric(s):
"""
Helper function to test if string is a number
"""
try:
float(s)
return True
except ValueError:
return False
if self.key_filter and re.match(self.key_filter, key) is None:
return
val = self.redis_db.get(key)
# Skip if the value hasn't changed
if skip_unchanged:
if key in self.message_last and val == self.message_last[key]:
return None
self.message_last[key] = val
try:
# If the first element is a number, try converting all the elements to numbers
val = val.decode("utf-8")
except:
# Otherwise, leave it as a string
pass
return val
def _initialize_redis_keys(self):
import json
interaction = {
"key_object": "",
"idx_link": 0,
"pos_click_in_link": [0,0,0],
"pos_mouse_in_world": [0,0,0],
"modifier_keys": [],
"key_down": ""
}
self.redis_db.set("webapp::simulator::interaction", json.dumps(interaction))
def run_forever(self, ws_server):
"""
Listen for redis keys (either realtime or every refresh_rate seconds)
and send updated values to all web socket clients every refresh_rate seconds.
"""
self._initialize_redis_keys()
if not self.realtime:
# Send messages to clients every refresh_rate seconds
prev_keys = set()
while True:
time.sleep(self.refresh_rate)
key_vals = []
new_keys = set()
keys = [key for key in self.redis_db.scan_iter()]
for key in keys:
if self.redis_db.type(key) != b"string":
continue
key = key.decode("utf-8")
if "high_res" in key:
continue
new_keys.add(key)
val = self.parse_val(key)
if val is None:
continue
key_vals.append((key, val))
del_keys = list(prev_keys - new_keys)
prev_keys = new_keys
if not key_vals and not del_keys:
continue
for key in del_keys:
self.message_last.pop(key, None)
ws_server.lock.acquire()
for client in ws_server.clients:
client.send(ws_server.encode_message({"update": key_vals, "delete": del_keys}))
ws_server.lock.release()
else:
# Create thread to send messages to client with refresh rate
messenger_thread = threading.Thread(target=self.messenger, args=(ws_server,))
messenger_thread.daemon = True
messenger_thread.start()
# Listen for redis notifications
for msg in self.pubsub.listen():
if msg["pattern"] is None:
continue
key = msg["data"]
val = self.parse_val(key)
if val is None:
continue
self.lock.acquire()
self.message_buffer.append((key, val))
self.lock.release()
def initialize_client(self, ws_server, client):
"""
On first connection, send client all Redis keys.
"""
key_vals = []
# TODO: Don't disrupt other clients
self.message_last = {}
for key in sorted(self.redis_db.scan_iter()):
if self.redis_db.type(key) != b"string":
continue
val = self.parse_val(key, skip_unchanged=False)
if val is None:
continue
key_vals.append((key.decode("utf-8"), val))
client.send(ws_server.encode_message({"update": key_vals, "delete": []}))
|
StarcoderdataPython
|
6568423
|
"""
@file: __init__.py
@time: 2020-09-23 20:37:10
"""
|
StarcoderdataPython
|
4999770
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from matplotlib import pyplot as plt
#get_ipython().magic(u'matplotlib notebook')
from keras.models import load_model
from model import get_personlab
from scipy.ndimage.filters import gaussian_filter
import cv2
import numpy as np
from time import time
from config import config
import random
from post_proc import *
# In[2]:
tic = time()
#model = get_personlab(train=False, with_preprocess_lambda=True,
# intermediate_supervision=True,
# intermediate_layer='res4b12_relu',
# build_base_func=get_resnet101_base,
# output_stride=16)
model = get_personlab(train=False, with_preprocess_lambda=True,
output_stride=8)
print 'Loading time: {}'.format(time()-tic)
# In[3]:
#model.load_weights('models/personlab_res101_400_r32_0510.h5')
model.load_weights('personlab_model_101_best.h5')
# In[4]:
# Pad image appropriately (to match relationship to output_stride as in training)
def pad_img(img, mult=16):
h, w, _ = img.shape
h_pad = 0
w_pad = 0
if (h-1)%mult > 0:
h_pad = mult-((h-1)%mult)
if (w-1)%mult > 0:
w_pad = mult-((w-1)%mult)
return np.pad(img, ((0,h_pad), (0,w_pad), (0,0)), 'constant')
#img = cv2.imread('testim.jpg')
#img = cv2.resize(img, (0,0), fx=.9, fy=.9)
#img = cv2.resize(img, (388,388))
#img = pad_img(img)
#print 'Image shape: {}'.format(img.shape)
# In[6]:
cap = cv2.VideoCapture('vid.mov')
while(cap.isOpened()):
ret, frame = cap.read()
img = cv2.resize(frame, (0,0), fx=.9, fy=.9)
img = pad_img(img)
outputs = model.predict(img[np.newaxis,...])
outputs = [o[0] for o in outputs]
H = compute_heatmaps(kp_maps=outputs[0], short_offsets=outputs[1])
# Gaussian filtering helps when there are multiple local maxima for the same keypoint.
for i in range(17):
H[:,:,i] = gaussian_filter(H[:,:,i], sigma=2)
pred_kp = get_keypoints(H)
print(len(pred_kp))
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# In[ ]:
|
StarcoderdataPython
|
6508299
|
<gh_stars>0
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that published std_msgs/Strings messages
## to the 'chatter' topic
import rospy
import curses
import time
from config import *
from std_msgs.msg import Int16MultiArray
# get the curses screen window
screen = curses.initscr()
# turn off input echoing
curses.noecho()
# respond to keys immediately (don't wait for enter)
curses.cbreak()
# map arrow keys to special values
screen.keypad(True)
safety_counter = 0
no_response_max_time = 5000 # milliseconds
#def timeout():
# safety_counter += 1
# screen.addstr(3, 65, str(safety_counter))
# if safety_counter > no_response_max_time:
# pwm_input.data = [steering_neutral,throttle_neutral]
# screen.addstr(3, 0, '<<<<<<<<<<<< TIMEOUT REACHED, RESETTING TO NEUTRAL >>>>>>>>>>>')
# safety_counter = 0
def talker():
pub = rospy.Publisher('controls', Int16MultiArray, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(100) # 100hz
steering_neutral = round((STEERING_RIGHT_PWM + STEERING_LEFT_PWM)/2.0)
throttle_neutral = THROTTLE_STOPPED_PWM + 10
throttle_max = THROTTLE_STOPPED_PWM + THROTTLE_RANGE
throttle_min = THROTTLE_STOPPED_PWM - THROTTLE_RANGE
steering_scale = 15
steering_string_range = round((STEERING_RIGHT_PWM - STEERING_LEFT_PWM)/(2*steering_scale))
while not rospy.is_shutdown():
pwm_input = Int16MultiArray()
pwm_input.data = [steering_neutral,throttle_neutral]
screen.addstr(0, 0, \
'Receiving Inputs Now\n'\
'DO NOT PRESS CTRL-C TO QUIT\n'\
'Must press q instead\n'\
'Currently outputting:'\
'[Steering PWM, Throttle PWM] = \n\n')
screen.addstr(3, 52, str(pwm_input.data))
screen.addstr(5, 0, 'Steering Angle:')
try:
while True:
screen.timeout(no_response_max_time)
char = screen.getch()
if char == ord('q'):
return
elif char == curses.KEY_RIGHT:
# print doesn't work with curses, use addstr instead
if pwm_input.data[0] >= STEERING_RIGHT_PWM:
# limit the steering max value (right side)
pwm_input.data[0] = STEERING_RIGHT_PWM
else:
pwm_input.data[0] += steering_scale
screen.addstr(3, 0, \
'Currently outputting:'\
'[Steering PWM, Throttle PWM] = ')
screen.addstr(3, 52, str(pwm_input.data))
steering_angle = round((pwm_input.data[0] - steering_neutral)/steering_scale)
whitespace_range = steering_string_range - abs(steering_angle)
whitespace = " "*whitespace_range
if steering_angle < 0:
steering_angle = -steering_angle
anglespace = '<'*steering_angle
display_angle = '|' + whitespace + anglespace + 'O' + " "*steering_string_range + '|'
screen.addstr(5,15, display_angle)
elif steering_angle > 0:
anglespace = '>'*steering_angle
display_angle = '|' + " "*steering_string_range + 'O' + anglespace + whitespace + '|'
screen.addstr(5,15, display_angle)
else:
display_angle = '|' + " "*steering_string_range + 'O' + " "*steering_string_range +\
'|'
screen.addstr(5,15, display_angle)
elif char == curses.KEY_LEFT:
if pwm_input.data[0] <= STEERING_LEFT_PWM:
# limit the steering max value (left side)
pwm_input.data[0] = STEERING_LEFT_PWM
else:
pwm_input.data[0] -= steering_scale
screen.addstr(3, 0, \
'Currently outputting:'\
'[Steering PWM, Throttle PWM] = ')
screen.addstr(3, 52, str(pwm_input.data))
steering_angle = round((pwm_input.data[0] - steering_neutral)/steering_scale)
whitespace_range = steering_string_range - abs(steering_angle)
whitespace = " "*whitespace_range
if steering_angle < 0:
steering_angle = -steering_angle
anglespace = '<'*steering_angle
display_angle = '|' + whitespace + anglespace + 'O' + " "*steering_string_range + '|'
screen.addstr(5,15, display_angle)
elif steering_angle > 0:
anglespace = '>'*steering_angle
display_angle = '|' + " "*steering_string_range + 'O' + anglespace + whitespace + '|'
screen.addstr(5,15, display_angle)
else:
display_angle = '|' + " "*steering_string_range + 'O' + " "*steering_string_range +\
'|'
screen.addstr(5,15, display_angle)
elif char == curses.KEY_UP:
if pwm_input.data[1] >= throttle_max:
# limit the steering max value
pwm_input.data[1] = throttle_max
else:
pwm_input.data[1] += 1
screen.addstr(3, 0, \
'Currently outputting:'\
'[Steering PWM, Throttle PWM] = ')
screen.addstr(3, 52, str(pwm_input.data))
elif char == curses.KEY_DOWN:
if pwm_input.data[1] <= throttle_min:
# limit the steering max value
pwm_input.data[1] = throttle_min
else:
pwm_input.data[1] -= 1
screen.addstr(3, 0, \
'Currently outputting:'\
'[Steering PWM, Throttle PWM] = ')
screen.addstr(3, 52, str(pwm_input.data))
elif char == ord(' '):
# hard stop -- hitting spacebar resets steering/throttle to neutral
pwm_input.data[1] = throttle_neutral
screen.addstr(3, 0, \
'HARD STOP TRIGGERED: RESET TO NEUTRAL THROTTLE ')
elif char == -1:
# Increase the safety counter, in case connection is lost this will trigger a stop
pwm_input.data = [steering_neutral,throttle_neutral]
screen.addstr(3, 0, '<<<<<<<<<<<<<< TIMEOUT REACHED, RESET TO NEUTRAL >>>>>>>>>>>>> ')
#if char == curses.KEY_RIGHT and char == curses.KEY_UP:
# pwm_input.data[0] += 1
# pwm_input.data[1] += 1
#char = screen.getch()
#rospy.logerr(pwm_input)
pub.publish(pwm_input)
rate.sleep()
finally:
# shut down cleanly
curses.nocbreak();
screen.keypad(0);
curses.echo()
curses.endwin()
#hello_str = "hello world %s" % rospy.get_time()
#rospy.loginfo(tpwm_input)
#pub.publish(tpwm_input)
#rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
curses.nocbreak();
screen.keypad(0);
curses.echo()
curses.endwin()
pass
|
StarcoderdataPython
|
5138658
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
BATCH_SIZE = 50
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
# This is a workaround for
# https://github.com/zooniverse/Panoptes/issues/2733
kwargs['page_size'] = BATCH_SIZE
if email:
if not isiterable(email):
email = [email]
for batch in split(email, BATCH_SIZE):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, BATCH_SIZE):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
StarcoderdataPython
|
1849464
|
import sys
from typing import Dict
from reportlab.lib.units import inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
import uharfbuzz as hb
SCALE_MULT = 1e5
class RLKerningError(Exception):
pass
# TODO drop the text arg once drawGlyphs has been implemented
# TODO rajouter sapce between words
# TODO struct cached_shape which includes the font name and size
def drawStringHB(pdf_canvas, x, y, text, cached_shape, mode=None, direction=None):
"""Draws a string in the current text styles."""
if sys.version_info[0] == 3 and not isinstance(text, str):
text = text.decode("utf-8")
t = pdf_canvas.beginText(x, y, direction=direction)
if mode is not None:
t.setTextRenderMode(mode)
x_advance = 0
y_advance = 0
# assumes the cached_shape buffer corresponds to the current text and style
# of the canvas
for i, pos in enumerate(cached_shape.glyph_positions):
xchar = x + x_advance / SCALE_MULT + pos.x_offset / SCALE_MULT
if pdf_canvas.bottomup:
# TODO verify if y_advance / offset is always be 0 for horizontal languages
ychar = y + y_advance / SCALE_MULT + pos.y_offset / SCALE_MULT
else:
ychar = y - y_advance / SCALE_MULT - pos.y_offset / SCALE_MULT
t.setTextOrigin(xchar, ychar)
t.textOut(text[i])
x_advance += pos.x_advance
y_advance += pos.y_advance
if mode is not None:
t.setTextRenderMode(0)
pdf_canvas.drawText(t)
def stringWidthHB(cached_shape):
x_advance = 0
# actually equals to :
# font.stringWidth (which sums the glyph widths) + 2*sum(x_offset)
for pos in cached_shape.glyph_positions:
x_advance += pos.x_advance
return x_advance / SCALE_MULT
def canvas_shapeHB(pdf_canvas, text, features: Dict[str, bool] = None):
font_name = pdf_canvas._fontname
font_size = pdf_canvas._fontsize
return shapeHB(text, font_name, font_size, features)
def shapeHB(text, font_name, font_size, features: Dict[str, bool] = None):
font = pdfmetrics.getFont(font_name)
if not isinstance(font, TTFont):
# TODO make valid for all types of fonts
raise RLKerningError("Not a TTF font")
fontdata = font.face._ttf_data
face = hb.Face(fontdata)
font = hb.Font(face)
# HB scales to integers in offset and advance so very big scale
# will divide by SCALE_MULT to get the actual size in fractional points
font.scale = (font_size * SCALE_MULT, font_size * SCALE_MULT)
hb.ot_font_set_funcs(font)
buf = hb.Buffer()
buf.add_str(text)
buf.guess_segment_properties()
hb.shape(font, buf, features)
return buf
if __name__ == "__main__":
from reportlab.pdfgen import canvas # isort:skip
from reportlab.lib.colors import red # isort:skip
# fonts = ["FiraCode-Regular"]
pdfmetrics.registerFont(TTFont("tnr", "tests/times-new-roman.ttf"))
pdf_canvas = canvas.Canvas("test.pdf", pagesize=(8.5 * inch, 8.5 * inch))
pdf_canvas.saveState()
font = ("tnr", 26)
pdf_canvas.setFont(*font)
text = "l <NAME> AV <NAME>"
cached_shape = canvas_shapeHB(pdf_canvas, text)
drawStringHB(pdf_canvas, 1.5 * inch, 1.7 * inch, text, cached_shape)
pdf_canvas.drawString(1.5 * inch, 2 * inch, text)
# draw bbox
def draw_bbox(size, pos):
pdf_canvas.saveState()
pdf_canvas.setStrokeColor(red)
pdf_canvas.setLineWidth(1)
p = pdf_canvas.beginPath()
p.rect(*size, *pos)
pdf_canvas.drawPath(p, stroke=True, fill=False)
pdf_canvas.restoreState()
asc, desc = pdfmetrics.getAscentDescent(*font)
width = stringWidthHB(cached_shape)
draw_bbox((1.5 * inch, 1.7 * inch + desc), (width, asc - desc))
width = pdfmetrics.stringWidth(text, *font)
draw_bbox((1.5 * inch, 2 * inch + desc), (width, asc - desc))
pdf_canvas.restoreState()
pdf_canvas.showPage()
pdf_canvas.save()
import freetype # isort:skip
from freetype.ft_enums import FT_LOAD_NO_SCALE # isort:skip
pdfmetrics.registerFont(TTFont("FiraCode-Regular", "tests/FiraCode-Regular.ttf"))
face = freetype.Face("tests/FiraCode-Regular.ttf")
font = ("FiraCode-Regular", 26)
buf = shapeHB("www", *font)
infos = buf.glyph_infos
poss = buf.glyph_positions
for info, pos in zip(infos, poss):
gid = info.codepoint
cluster = info.cluster
xa = pos.x_advance / SCALE_MULT
xo = pos.x_offset / SCALE_MULT
name = face.get_glyph_name(gid)
a = 3
face = freetype.Face("tests/FiraCode-Regular.ttf")
empty = face.get_glyph_name(1625)
con = face.get_glyph_name(1495)
face.set_char_size(48 * 64)
face.load_glyph(1625)
m = face.glyph.metrics
a = 3
pdfmetrics.registerFont(TTFont("MinionPro-Regular", "tests/Minion Pro Regular.ttf"))
face = freetype.Face("tests/Minion Pro Regular.ttf")
units_em = face.units_per_EM
font = ("MinionPro-Regular", 48)
buf = shapeHB("ffi ffl", *font)
infos = buf.glyph_infos
poss = buf.glyph_positions
for info, pos in zip(infos, poss):
gid = info.codepoint
cluster = info.cluster
name = face.get_glyph_name(gid)
xa = pos.x_advance / SCALE_MULT
xo = pos.x_offset / SCALE_MULT
face.set_char_size(48 * 64)
face.load_glyph(gid, FT_LOAD_NO_SCALE)
# xa == horiadvance / units_em * font_size
m = face.glyph.metrics
a = 3
|
StarcoderdataPython
|
8118471
|
<filename>covariant_compositional_networks_tf2/tests/testModel_2.py
from covariant_compositional_networks_tf2.CCN_Model import CCN_Model
import tensorflow as tf
from functools import reduce
from operator import mul
from ordered_set import OrderedSet
import numpy as np
from sklearn.metrics import accuracy_score
from graphColoring import randomNPGraph, checkIfGraphConnected
channels_in = 2
feature_vector_shape=[1]
k=2
model = CCN_Model(lr =2e-4, lr_decay_rate=0.95, lr_min=3e-6, loss = tf.losses.logcosh, nonlinearity=tf.nn.tanh,
feature_vector_shape=feature_vector_shape, num_layers=5, k=k, channels_in=[channels_in,4,5,4,3,1])
helperVar = reduce(mul, [channels_in] + feature_vector_shape)
inp = [[tf.Variable(tf.reshape(tf.range(helperVar, dtype=tf.float32) + 1, [channels_in] + [1] * k + feature_vector_shape )),
tf.Variable(tf.reshape(tf.range(helperVar, dtype=tf.float32) + helperVar + 1, [channels_in] + [1] * k + feature_vector_shape)),
tf.Variable(tf.reshape(tf.range(helperVar, dtype=tf.float32) + helperVar * 2 + 1, [channels_in]+ [1] * k + feature_vector_shape))],
# 2 feature vectors
# for k =2 its [ [[[1,2,3]]], [[[5,6,7]]] ]
np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]]), # adjacency matrix of DIRECTED graph - node[0] will gather inputs from [0] and [1]
# and node[1] only from [1]
[OrderedSet([0]), OrderedSet([1]), OrderedSet([2])]] # parts - P(0) = {0}, and P(1) = {1} - cummulative receptive field
def complete_graph(nodes):
node_features = [tf.Variable(tf.reshape(tf.ones(helperVar, dtype=tf.float32), [channels_in] + [1] * k + feature_vector_shape ))] * nodes
adjM = np.ones((nodes, nodes))
parts = [OrderedSet([i]) for i in range(nodes)]
return [node_features, adjM, parts]
def uncomplete_graph(nodes):
node_features = [tf.Variable(tf.reshape(tf.ones(helperVar, dtype=tf.float32), [channels_in] + [1] * k + feature_vector_shape ))] * nodes
while True:
adjM = randomNPGraph(nodes, 0.5, diagonal = True, undirected = True)
if checkIfGraphConnected(adjM) and not np.array_equal(adjM, np.ones(adjM.shape)):
break
parts = [OrderedSet([i]) for i in range(nodes)]
return [node_features, adjM, parts]
def gen_graphs(num, nodes, p=.5):
graphs = []
for _ in range(num):
if np.random.rand()>p:
G, y = complete_graph(nodes), tf.constant([1.])
graphs.append(G + [y])
else:
G, y = uncomplete_graph(nodes), tf.constant([0.])
graphs.append(G + [y])
return graphs
def train_test_split(graphs, train_fraction=0.8):
split_idx = int(np.round(train_fraction*len(graphs), decimals=0))
g_train, g_test = graphs[:split_idx], graphs[split_idx:]
return g_train, g_test
def data_preparation(n = 200, nodes = 4):
graphs = gen_graphs(n, nodes)
idxs = np.arange(len(graphs), dtype=np.int)
return train_test_split(graphs, train_fraction=0.8)
#result = model.predict(inp[0], inp[1], inp[2])
#resultSum = np.sum(result)
#y = tf.constant([2.0])
#print(y)
#print(result)
def train_and_test(model, data, epochs=100):
g_train, g_test = data
for epoch in range(epochs):
for i in range(g_train):
features, adjM, parts, y = g_train[i]
model.fit(features, y, adjM, parts)
predicted = []
target = []
for i in range(len(g_test)):
features, adjM, parts, y = g_train[i]
target.append(y)
predicted.append(model.predict(features, adjM, parts))
print("Accuracy of the model is {}".format(accuracy_score(y_test, predicted)))
if __name__=='__main__':
train_and_test(model, data_preparation())
# for i in range(100):
#
# #model.fit(inp[0], y, inp[1], inp[2])
# for i in range(len(y)):
#
# result = model.predict(inp[0], inp[1], inp[2])
# print(result)
#list(reversed(inp[0]))
# inpSwap = [inp[0][1], inp[0][2], inp[0][0]]
# adjMOther = np.array([[1, 1, 1], [1, 1, 1], [1,1,1]])
# result = model.predict(inp[0], adjMOther, inp[2])
# print(result)
|
StarcoderdataPython
|
1824825
|
import pickle
import time
import numpy as np
from sklearn import metrics
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import utils as u
def get_classifiers():
return [
# ('Logistic Regression (C=1)', LogisticRegression(C=1)),
# ('SVM, linear', SVC(kernel="linear", C=0.015, cache_size=200)),
# ('k nn', KNeighborsClassifier(3)),
# ('Decision Tree', DecisionTreeClassifier(max_depth=15)),
('Random Forest', RandomForestClassifier(n_estimators=500, n_jobs=10)),
# ('AdaBoost', AdaBoostClassifier()),
# ('Naive Bayes', GaussianNB()),
# ('LDA', LinearDiscriminantAnalysis()),
# ('QDA', QuadraticDiscriminantAnalysis()),
# ('Gradient Boosting', GradientBoostingClassifier(learning_rate=0.5, n_estimators=50))
]
def train_and_evaluate_all(feature_set):
data = get_data(feature_set)
classifiers = get_classifiers()
# one loop to fit them all
classifier_data = {}
best = []
for clf_name, clf in classifiers:
print("\n'%s' classifier..." % clf_name)
t0 = time.time()
clf.fit(data['train']['X'], data['train']['y'])
t1 = time.time()
an_data = get_results(clf, data, t1 - t0, feature_set, clf_name=clf_name, save_confusion=True)
classifier_data[clf_name] = {'training_time': t1 - t0,
'testing_time': an_data['testing_time'],
'accuracy': an_data['accuracy']}
best.append((clf_name, an_data['accuracy']))
best = sorted(best, key=lambda x: x[1]) # sort by accuracy
print_top_n(best, 10)
pickle.dump(best, open('./sk_classifier_results_%s.p' % feature_set, "wb"))
def get_results(clf, data, fit_time, feature_set, clf_name='', save_confusion=False):
results = {}
t0 = time.time()
predicted = np.array([])
for i in range(0, len(data['test']['X']), 128): # go in chunks of size 128
predicted_single = clf.predict(data['test']['X'][i:(i + 128)])
predicted = np.append(predicted, predicted_single)
t1 = time.time()
cm = metrics.confusion_matrix(data['test']['y'], predicted)
results['testing_time'] = t1 - t0
results['accuracy'] = metrics.accuracy_score(data['test']['y'], predicted)
print("classifier: %s" % clf_name)
print("training time: %0.4fs" % fit_time)
print("testing time: %0.4fs" % results['testing_time'])
print("accuracy: %0.4f" % results['accuracy'])
print("confusion matrix:\n%s" % cm)
if save_confusion:
path = './confusion_plots/%s_%s' % (clf_name, feature_set)
title = '%s (accuracy: %0.2f)' % (clf_name, results['accuracy'])
u.plot_confusion_matrix(cm, path, title=title)
return results
def get_data(feature_set):
# load dataset
paths = u.load_paths('PATHS.yaml') # get paths from file
train_x, val_x, test_x = u.load_data(paths['extracted_data'] + 'features_%s.p' % feature_set)
train_y, val_y, test_y = u.load_data(paths['extracted_data'] + 'labels_%s.p' % feature_set)
test_x = np.vstack((val_x, test_x)) # dont use validation set for grid search, add to test data
test_y = np.vstack((val_y, test_y))
train_y = u.inv_one_hot_encode(train_y) # remove one hot encoding
test_y = u.inv_one_hot_encode(test_y)
if feature_set == u.FEATURE_SET_SPECS_NORM:
# flatten 128 x 128 image
length = train_x.shape[1] * train_x.shape[2]
train_x = train_x.reshape(train_x.shape[0], length)
test_x = test_x.reshape(test_x.shape[0], length)
data = {'train': {'X': train_x, 'y': train_y},
'test': {'X': test_x, 'y': test_y},
'n_classes': len(np.unique(train_y))}
print("dataset has %i training samples and %i test samples." %
(len(data['train']['X']), len(data['test']['X'])))
return data
def print_top_n(l, n=10):
print('\nTop %d' % n)
for i in range(0, n):
name, acc = l.pop()
print('%d : %s (%0.2f)' % (i + 1, name, acc))
if __name__ == '__main__':
feature_set = u.FEATURE_SET_MEANS
# feature_set = u.FEATURE_SET_SPECS_NORM
results = train_and_evaluate_all(feature_set)
|
StarcoderdataPython
|
12825797
|
<reponame>iamharshit/ML_works
import tensorflow as tf
import numpy as np
import cv2
img_original = cv2.imread('jack.jpg') #data.camera()
img = cv2.resize(img_original, (64*5,64*5))
# for positions
xs = []
# for corresponding colors
ys = []
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
xs.append([row_i, col_i])
ys.append(img[row_i, col_i])
# list->numpy array
xs,ys = np.array(xs),np.array(ys)
# normalising input img
xs = (xs-np.mean(xs))/np.std(xs)
# placeholders for input and output
X = tf.placeholder(tf.float32, shape=[None, 2], name='X')
Y = tf.placeholder(tf.float32, shape=[None, 3], name='Y')
#defining weights,bias,non-linearity
def linear(X, n_input, n_output, activation=None, scope=None):
with tf.variable_scope(scope or "linear"):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer())
h = tf.matmul(X, W) + b
if activation is not None:
h = activation(h)
return h
#building neural-net with 5 layers
n_neurons = [2,64,64,64,64,64,64,3]
#defining optimizer
def distance(p1, p2):
return tf.abs(p1 - p2)
#building network
current_input = X
for layer_i in range(1, len(n_neurons)):
current_input = linear(
X=current_input,
n_input=n_neurons[layer_i - 1],
n_output=n_neurons[layer_i],
activation=tf.nn.relu if (layer_i+1) < len(n_neurons) else None,
scope='layer_' + str(layer_i))
Y_pred = current_input
cost = tf.reduce_mean(tf.reduce_sum(distance(Y_pred,Y),1) )
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
#training Neural Net
n_iterations = 500
batch_size = 50
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
prev_training_cost = 0.0
for it_i in range(n_iterations):
idxs = np.random.permutation(range(len(xs)))
n_batches = len(idxs) // batch_size
for batch_i in range(n_batches):
idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]
sess.run(optimizer, feed_dict={X: xs[idxs_i], Y: ys[idxs_i] })
training_cost = sess.run(cost, feed_dict={X: xs, Y: ys})
print(it_i, training_cost)
if (it_i + 1) % 20 == 0:
ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
fig, ax = plt.subplots(1, 1)
print ys_pred.shape,img.shape
print ys_pred
img = np.clip(ys_pred.reshape(img.shape), 0, 255).astype(np.uint8)
cv2.imwrite("face____" + str(it_i) + ".jpg", img)
|
StarcoderdataPython
|
1945085
|
<reponame>immuta/tap-canny
"""tap-canny"""
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.