blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79a91706bf96b9abcd40c27b0c9b989e0f98d43c | a226186de42abb44e426e54b2dd03ccfc882fd75 | /courses/settings.py | d32737c62dc4fd969901f042bbe18662f1dca84b | [] | no_license | KristoD/Courses-Python2-Django | 87ad0b54aae9007407929033545a9d8fe0b3e380 | 371bafcc1239e21c4c839b8158b3339b43e16937 | refs/heads/master | 2020-03-26T00:40:56.025046 | 2018-08-10T21:03:44 | 2018-08-10T21:03:44 | 144,332,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | """
Django settings for courses project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r$y&a9a^dhb$0yg54wo5sj^=c3)i11ky*^^dgxm6xcrdoqhjj!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.course',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'courses.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'courses.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
adfbeffea9979a517b0969fbc17f9bae8accd85f | 3a0da75df781481b054ab885a251361b4559aea2 | /superlists/lists/tests/test_views.py | 5dddcf23566e246c908c0e5de6b485b245fcef5f | [] | no_license | vickiev305/mhcjterm2016 | 03bc92bd1404e60b69a80b12cf7b4ee09ac3bc7f | 6dfdb5098a9883d947f78e8522821882c395ffac | refs/heads/master | 2021-01-10T02:10:27.405891 | 2016-01-15T16:50:46 | 2016-01-15T16:50:46 | 49,726,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,127 | py | from django.core.urlresolvers import resolve
from django.utils.html import escape
from django.template.loader import render_to_string
from django.test import TestCase
from lists.views import home_page
from django.http import HttpRequest
from lists.models import Item, List
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func,home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
expected_html = render_to_string('home.html')
self.assertEqual(response.content.decode(),expected_html)
def test_home_page_has_todo_lists(self):
list1= List.objects.create(name="List 1")
list2= List.objects.create(name="List 2")
response = self.client.get('/')
context = response.context['todo_lists']
self.assertEqual(len(context),2)
self.assertEqual(context[0],list1)
self.assertEqual(context[1],list2)
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
self.client.post(
'/lists/new',
data={'item_text':'A new list item'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
response = self.client.post(
'/lists/new',
data={'item_text':'A new lists item'}
)
new_list = List.objects.first()
self.assertRedirects(response,'/lists/%d/' % (new_list.id,))
def test_validation_errors_are_sent_back_to_home_page(self):
response = self.client.post('/lists/new', data = {'item_text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
expected_error = escape("You can't have an empty list item")
self.assertContains(response, expected_error)
def test_invalid_items_arent_saved(self):
self.client.post('/lists/new', data = {'item_text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
def test_new_list_has_name_of_first_item(self):
response = self.client.post(
'/lists/new',
data={'item_text':'A new lists item'}
)
new_list = List.objects.first()
self.assertEqual(new_list.name,'A new lists item')
class ListViewTest(TestCase):
def test_uses_list_template(self):
new_list = List.objects.create()
response = self.client.get('/lists/%d/' % (new_list.id,))
self.assertTemplateUsed(response, 'list.html')
def test_displays_only_items_for_list(self):
new_list = List.objects.create()
Item.objects.create(text = 'itemey 1', list = new_list)
Item.objects.create(text = 'itemey 2', list = new_list)
other_list = List.objects.create()
Item.objects.create(text = 'other item 1', list = other_list)
Item.objects.create(text = 'other item 2', list = other_list)
response = self.client.get('/lists/%d/' % (new_list.id,))
self.assertContains(response, "itemey 1")
self.assertContains(response, "itemey 2")
self.assertNotContains(response, 'other item 1')
self.assertNotContains(response, 'other item 2')
def test_passes_correct_list_to_template(self):
correct_list = List.objects.create()
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertEqual(response.context['list'], correct_list)
def test_can_save_a_POST_request_to_an_existing_list(self):
correct_list = List.objects.create()
self.client.post(
'/lists/%d/' % (correct_list.id,),
data = {'item_text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_validation_errors_stay_on_list_page(self):
current_list = List.objects.create()
response = self.client.post(
'/lists/%d/' % (current_list.id,),
data = {'item_text': ''}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
expected_error = escape("You can't have an empty list item")
self.assertContains(response, expected_error)
def test_invalid_items_arent_saved(self):
current_list = List.objects.create()
self.client.post(
'/lists/%d' % (current_list.id),
data = {'item_text': ''}
)
self.assertEqual(Item.objects.count(), 0)
def test_list_view_displays_checkbox(self):
current_list = List.objects.create()
Item.objects.create(text="Item 1", list=current_list)
Item.objects.create(text="Item 2", list=current_list)
response = self.client.get('/lists/%d/' % (current_list.id,))
self.assertContains(response, 'input type="checkbox"')
def test_edit_list_name(self):
current_list = List.objects.create()
self.client.post(
'/lists/%d/' % (current_list.id,),
data = {'list_name':'New List'}
)
self.assertEqual(List.objects.first().name, 'New List')
class EditListTest(TestCase):
def test_POST_items_toggle_done(self):
#Create list and items
current_list = List.objects.create()
item1 = Item.objects.create(text="Item 1", list=current_list)
item2 = Item.objects.create(text="Item 2", list=current_list)
#POST data
response = self.client.post(
'/lists/%d/items/' % (current_list.id,),
data ={'mark_item_done': item1.id},
)
#-including toggle item
self.assertRedirects(response, '/lists/%d/' % (current_list.id,))
#Check that item is updated
item1 = Item.objects.get(id=item1.id)
item2 = Item.objects.get(id=item2.id)
self.assertTrue(item1.is_done)
self.assertFalse(item2.is_done)
def test_POST_multiple_items_done(self):
current_list = List.objects.create()
item1 = Item.objects.create(text="Item 1", list=current_list)
item2 = Item.objects.create(text="Item 2", list=current_list)
response = self.client.post(
'/lists/%d/items/' % (current_list.id),
data = {'mark_item_done': [item1.id,item2.id]}
)
item1 = Item.objects.get(id = item1.id)
item2 = Item.objects.get(id = item2.id)
self.assertTrue(item1.is_done)
self.assertTrue(item2.is_done)
def test_POST_zero_items_done(self):
current_list = List.objects.create()
item1 = Item.objects.create(text="Item 1", list=current_list)
item2 = Item.objects.create(text="Item 2", list=current_list)
response = self.client.post(
'/lists/%d/items/' % (current_list.id),
data = {}
)
item1 = Item.objects.get(id = item1.id)
item2 = Item.objects.get(id = item2.id)
self.assertFalse(item1.is_done)
self.assertFalse(item2.is_done)
def test_POST_item_toggles_done(self):
#Create list and items
current_list = List.objects.create()
item1 = Item.objects.create(text="Item 1", list=current_list, is_done=True)
item2 = Item.objects.create(text="Item 2", list=current_list, is_done=False)
#POST data
response = self.client.post(
'/lists/%d/items/' % (current_list.id,),
data ={'mark_item_done': item2.id},
)
#-including toggle item
self.assertRedirects(response, '/lists/%d/' % (current_list.id,))
#Check that item is updated
item1 = Item.objects.get(id=item1.id)
item2 = Item.objects.get(id=item2.id)
self.assertFalse(item1.is_done)
self.assertTrue(item2.is_done)
| [
"[email protected]"
] | |
2e6db1095be14073089b39307e04d983490684cd | 5e32c5c4db30edfbbd8016325dd0d48ddf672d4f | /integrationtest/vm/hybrid/test_stub.py | f611a2d19a6a0efa47dc907892e8d11930e7e680 | [
"Apache-2.0"
] | permissive | welldoer/zstack-woodpecker | 7519e0cffe98cba84b7774c8e8b296c2d1318615 | e0cea9672c764858fc124fa0b02111fc31b2af0f | refs/heads/master | 2020-03-28T22:11:50.431319 | 2018-09-14T10:08:27 | 2018-09-14T10:08:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,442 | py | '''
Create an unified test_stub to share test operations
@author: Youyk
'''
import os
import sys
import time
import random
import commands
import threading
import urllib2
import functools
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import zstackwoodpecker.zstack_test.zstack_test_vip as zstack_vip_header
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.hybrid_operations as hyb_ops
import zstackwoodpecker.operations.nas_operations as nas_ops
import zstackwoodpecker.operations.ipsec_operations as ipsec_ops
from multiprocessing import Process
Port = test_state.Port
rule1_ports = Port.get_ports(Port.rule1_ports)
rule2_ports = Port.get_ports(Port.rule2_ports)
rule3_ports = Port.get_ports(Port.rule3_ports)
rule4_ports = Port.get_ports(Port.rule4_ports)
rule5_ports = Port.get_ports(Port.rule5_ports)
denied_ports = Port.get_denied_ports()
#rule1_ports = [1, 22, 100]
#rule2_ports = [9000, 9499, 10000]
#rule3_ports = [60000, 60010, 65535]
#rule4_ports = [5000, 5501, 6000]
#rule5_ports = [20000, 28999, 30000]
#test_stub.denied_ports = [101, 4999, 8990, 15000, 30001, 49999]
target_ports = rule1_ports + rule2_ports + rule3_ports + rule4_ports + rule5_ports + denied_ports
datacenter_type = os.getenv('datacenterType')
_postfix = time.strftime('%m%d-%H%M%S', time.localtime())
TEST_ECS_NAME = 'ZStack-Hybrid-Test-ECS-Instance'
ECS_IMAGE_NAME = 'zstack-test-ecs-image'
class HybridObject(object):
def __init__(self):
self.ks = None
self.datacenter = None
self.region_id = None
self.iz = None
self.vpc = None
self.vswitch = None
self.vpn = None
self.eip = None
self.eip_create = None
self.sg = None
self.sg_create = None
self.sg_rule = None
self.vm = None
self.vr = None
self.vr_name = 'zstack-test-vrouter-%s' % _postfix
self.route_entry = None
self.oss_bucket = None
self.oss_bucket_create = None
self.ecs_instance = None
self.ecs_image = None
self.disk = None
self.snapshot = None
self.vpn_gateway = None
self.user_vpn_gateway = None
self.user_gw_ip = None
self.dst_cidr_block = None
self.prepaid_ecs = None
self.vpn_connection = None
self.cond_image_system = res_ops.gen_query_conditions('type', '=', 'system')
def add_ks(self, ks2):
ks_existed = hyb_ops.query_hybrid_key_secret()
for ks in ks_existed:
if ks.name == 'test_hybrid2' and ks2:
self.ks = ks
elif ks.name == 'test_hybrid' and not ks2:
self.ks = ks
if not self.ks:
if ks2:
self.ks = hyb_ops.add_hybrid_key_secret('test_hybrid2', 'test for hybrid', os.getenv('aliyunKey2'), os.getenv('aliyunSecret2'))
else:
self.ks = hyb_ops.add_hybrid_key_secret('test_hybrid', 'test for hybrid', os.getenv('aliyunKey'), os.getenv('aliyunSecret'))
hyb_ops.attach_hybrid_key(self.ks.uuid)
time.sleep(5)
def clean_datacenter(self):
'''
Clean DataCenter remained in local
'''
datacenter_local = hyb_ops.query_datacenter_local()
if datacenter_local:
for d in datacenter_local:
hyb_ops.del_datacenter_in_local(d.uuid)
def add_datacenter_iz(self, add_datacenter_only=False, check_vpn_gateway=False, region_id=None, check_ecs=False, check_prepaid_ecs=False, ks2=None):
self.add_ks(ks2=ks2)
self.clean_datacenter()
if region_id:
self.datacenter = hyb_ops.add_datacenter_from_remote(datacenter_type, region_id, 'datacenter for test')
self.region_id = region_id
return
datacenter_list = hyb_ops.get_datacenter_from_remote(datacenter_type)
regions = [dc.regionId for dc in datacenter_list]
err_list = []
for r in regions:
if r == 'cn-beijing':
continue
try:
datacenter = hyb_ops.add_datacenter_from_remote(datacenter_type, r, 'datacenter for test')
except hyb_ops.ApiError, e:
err_list.append(e)
continue
if datacenter and add_datacenter_only:
self.datacenter = datacenter
self.region_id = r
return
elif len(err_list) == len(regions):
raise hyb_ops.ApiError("Failed to add DataCenter: %s" % err_list)
# Add Identity Zone
iz_list = hyb_ops.get_identity_zone_from_remote(datacenter_type, r)
vpn_gateway_normal = []
prepaid_ecs_list = []
for iz in iz_list:
if not iz.availableInstanceTypes:
continue
iz_inv = hyb_ops.add_identity_zone_from_remote(datacenter_type, datacenter.uuid, iz.zoneId)
if check_vpn_gateway:
vpn_gateway_list = hyb_ops.sync_vpc_vpn_gateway_from_remote(datacenter.uuid)
vpn_gateway_normal = [gw for gw in vpn_gateway_list if gw.businessStatus == 'Normal']
if iz_inv and vpn_gateway_normal:
self.datacenter = datacenter
self.iz = iz_inv
self.vpn_gateway = vpn_gateway_normal[0]
return
else:
self.del_iz(iz_inv.uuid)
elif check_ecs:
ecs_list = hyb_ops.sync_ecs_instance_from_remote(datacenter.uuid)
if ecs_list:
self.datacenter = datacenter
self.iz = iz_inv
self.ecs_instance = ecs_list[0]
return
else:
self.del_iz(iz_inv.uuid)
elif iz_inv and check_prepaid_ecs:
ecs_list = hyb_ops.sync_ecs_instance_from_remote(datacenter.uuid, only_zstack='false')
prepaid_ecs_list = [ep for ep in ecs_list if ep.chargeType == 'PrePaid']
if prepaid_ecs_list:
self.datacenter = datacenter
self.iz = iz_inv
self.prepaid_ecs = prepaid_ecs_list[0]
return
else:
self.del_iz(iz_inv.uuid)
elif iz_inv:
self.datacenter = datacenter
self.iz = iz_inv
self.dc_uuid = datacenter.uuid
self.zone_id = iz_inv.zoneId
return
if check_vpn_gateway and vpn_gateway_normal:
break
elif check_prepaid_ecs and prepaid_ecs_list:
break
else:
hyb_ops.del_datacenter_in_local(datacenter.uuid)
if check_vpn_gateway and not vpn_gateway_normal:
test_util.test_fail("VpnGate for ipsec vpn connection was not found in all available dataCenter")
elif check_prepaid_ecs and not prepaid_ecs_list:
test_util.test_fail("Prepaid ECS was not found in all available dataCenter")
def add_dc_iz_concurrence(self, add_method, **kw):
plist = []
for _ in range(2):
plist.append(Process(target=add_method, args=(kw,)))
for p in plist:
p.start()
for p in plist:
p.join()
def check_datacenter_unique(self):
def add_dc(kw):
try:
hyb_ops.add_datacenter_from_remote(datacenter_type, kw['region_id'], 'datacenter for test')
except hyb_ops.ApiError, e:
test_util.test_dsc(e)
self.clean_datacenter()
self.add_dc_iz_concurrence(add_dc, region_id=self.region_id)
condition = res_ops.gen_query_conditions('regionId', '=', self.region_id)
assert len(hyb_ops.query_datacenter_local(condition)) == 1
def del_datacenter(self):
hyb_ops.del_datacenter_in_local(self.datacenter.uuid)
condition = res_ops.gen_query_conditions('regionId', '=', self.region_id)
assert not hyb_ops.query_datacenter_local(condition)
def check_iz_unique(self):
def add_iz(kw):
try:
hyb_ops.add_identity_zone_from_remote(datacenter_type, kw['dc_uuid'], kw['zone_id'])
except hyb_ops.ApiError, e:
test_util.test_dsc(e)
self.del_iz()
self.add_dc_iz_concurrence(add_iz, dc_uuid=self.dc_uuid, zone_id=self.zone_id)
condition = res_ops.gen_query_conditions('zoneId', '=', self.iz.zoneId)
assert len(hyb_ops.query_iz_local(condition)) == 1
def del_iz(self, iz_uuid=None):
if iz_uuid:
hyb_ops.del_identity_zone_in_local(iz_uuid)
else:
hyb_ops.del_identity_zone_in_local(self.iz.uuid)
condition = res_ops.gen_query_conditions('zoneId', '=', self.iz.zoneId)
assert not hyb_ops.query_iz_local(condition)
def check_resource(self, ops, cond_name, cond_val, query_method, aliyun_nas=False):
condition = res_ops.gen_query_conditions(cond_name, '=', cond_val)
if aliyun_nas:
query_str = 'nas_ops.%s(condition)' % query_method
else:
query_str = 'hyb_ops.%s(condition)' % query_method
if ops == 'create':
assert eval(query_str)
elif ops == 'delete':
assert not eval(query_str)
elif ops == 'sync':
return eval(query_str)[0]
def create_bucket(self):
self.bucket_name = 'zstack-test-oss-bucket-%s-%s' % (_postfix, self.region_id)
self.oss_bucket = hyb_ops.create_oss_bucket_remote(self.datacenter.uuid, self.bucket_name, 'created-by-zstack-for-test')
self.oss_bucket_create = True
self.check_resource('create', 'bucketName', self.bucket_name, 'query_oss_bucket_file_name')
def add_bucket(self):
bucket_remote = hyb_ops.get_oss_bucket_name_from_remote(self.datacenter.uuid)
if bucket_remote:
self.bucket_name = bucket_remote[0].bucketName
self.oss_bucket = hyb_ops.add_oss_bucket_from_remote(self.datacenter.uuid, self.bucket_name)
else:
self.create_bucket()
bucket_local = hyb_ops.query_oss_bucket_file_name()
bucket_name_local = [bk.bucketName for bk in bucket_local]
assert self.bucket_name in bucket_name_local
def attach_bucket(self):
hyb_ops.attach_oss_bucket_to_ecs_datacenter(self.oss_bucket.uuid)
bucket_local = hyb_ops.query_oss_bucket_file_name()
bucket_attached = [bk for bk in bucket_local if bk.uuid==self.oss_bucket.uuid]
assert bucket_attached[0].current == 'true'
def detach_bucket(self):
hyb_ops.detach_oss_bucket_from_ecs_datacenter(self.oss_bucket.uuid)
bucket_local = hyb_ops.query_oss_bucket_file_name()
bucket_attached = [bk for bk in bucket_local if bk.uuid==self.oss_bucket.uuid]
assert bucket_attached[0].current == 'false'
def update_oss_bucket(self, name=None, description=None):
oss_bucket_attr = {'name':name,
'description':description,
}
for k in oss_bucket_attr.keys():
if oss_bucket_attr[k]:
self.oss_bucket = hyb_ops.update_oss_bucket(self.oss_bucket.uuid, **oss_bucket_attr)
oss_bucket_attr_eq = "self.oss_bucket.%s == '%s'" % (k, oss_bucket_attr[k])
assert eval(oss_bucket_attr_eq)
def del_bucket(self, remote=True):
if remote:
condition = res_ops.gen_query_conditions('bucketName', '=', self.bucket_name)
if not hyb_ops.query_oss_bucket_file_name(condition):
self.oss_bucket = hyb_ops.add_oss_bucket_from_remote(self.datacenter.uuid, self.bucket_name)
bucket_file = hyb_ops.get_oss_bucket_file_from_remote(self.oss_bucket.uuid).files
if bucket_file:
time.sleep(20)
for i in bucket_file:
hyb_ops.del_oss_bucket_file_remote(self.oss_bucket.uuid, i)
time.sleep(10)
hyb_ops.del_oss_bucket_remote(self.oss_bucket.uuid)
else:
if self.oss_bucket:
hyb_ops.del_oss_bucket_name_in_local(self.oss_bucket.uuid)
elif self.oss_bucket_create:
hyb_ops.del_oss_bucket_name_in_local(self.oss_bucket_create.uuid)
self.check_resource('delete', 'bucketName', self.bucket_name, 'query_oss_bucket_file_name')
def create_aliyun_disk(self):
disk_name = 'zstack-test-aliyun-disk-%s' % _postfix
self.disk = hyb_ops.create_aliyun_disk_remote(disk_name, self.iz.uuid, 20, disk_category='cloud_efficiency')
self.check_resource('create', 'diskId', self.disk.diskId, 'query_aliyun_disk_local')
time.sleep(10)
def del_aliyun_disk(self, remote=True):
if remote:
hyb_ops.del_aliyun_disk_remote(self.disk.uuid)
hyb_ops.sync_aliyun_disk_from_remote(self.iz.uuid)
else:
hyb_ops.del_aliyun_disk_in_local(self.disk.uuid)
self.check_resource('delete', 'diskId', self.disk.diskId, 'query_aliyun_disk_local')
def attach_aliyun_disk(self):
hyb_ops.attach_aliyun_disk_to_ecs(self.ecs_instance.uuid, disk_uuid=self.disk.uuid)
time.sleep(30)
self.sync_aliyun_disk()
assert self.disk.ecsInstanceUuid == self.ecs_instance.uuid
assert self.disk.status.lower() == 'in_use'
def detach_aliyun_disk(self):
hyb_ops.detach_aliyun_disk_from_ecs(self.disk.uuid)
self.sync_aliyun_disk()
assert self.disk.status.lower() == 'available'
def sync_aliyun_disk(self, check=True):
hyb_ops.sync_aliyun_disk_from_remote(self.iz.uuid)
if check:
condition = res_ops.gen_query_conditions('diskId', '=', self.disk.diskId)
assert hyb_ops.query_aliyun_disk_local(condition)
self.disk = hyb_ops.query_aliyun_disk_local(condition)[0]
def update_aliyun_disk(self, name=None, description=None, delete_with_instance=None, delete_autosnapshot=None, enable_autosnapshot=None):
disk_attr = {'name':name,
'description':description,
'delete_with_instance':delete_with_instance,
'delete_autosnapshot':delete_autosnapshot,
'enable_autosnapshot':enable_autosnapshot
}
for k in disk_attr.keys():
if disk_attr[k]:
hyb_ops.update_aliyun_disk(self.disk.uuid, **disk_attr)
if k == 'delete_with_instance':
self.delete_disk_with_instance = True
elif k == 'delete_autosnapshot':
self.delete_autosnapshot = True
elif k == 'enable_autosnapshot':
self.enable_autosnapshot = True
else:
self.sync_aliyun_disk()
disk_attr_eq = "self.disk.%s == '%s'" % (k, disk_attr[k])
assert eval(disk_attr_eq)
def create_aliyun_snapshot(self, disk_type='data'):
snapshot_name = 'zstack-test-aliyun-snapshot-%s' % _postfix
if disk_type == 'system':
condition = res_ops.gen_query_conditions('ecsInstanceUuid', '=', self.ecs_instance.uuid)
self.system_disk = hyb_ops.query_aliyun_disk_local(condition)[0]
self.snapshot = hyb_ops.creaet_aliyun_snapshot_remote(self.system_disk.uuid, snapshot_name)
else:
self.snapshot = hyb_ops.creaet_aliyun_snapshot_remote(self.disk.uuid, snapshot_name)
self.check_resource('create', 'snapshotId', self.snapshot.snapshotId, 'query_aliyun_snapshot_local')
def sync_aliyun_snapshot(self):
hyb_ops.sync_aliyun_snapshot_from_remote(self.datacenter.uuid)
condition = res_ops.gen_query_conditions('snapshotId', '=', self.snapshot.snapshotId)
assert hyb_ops.query_aliyun_snapshot_local(condition)
self.snapshot = hyb_ops.query_aliyun_snapshot_local(condition)[0]
def update_aliyun_snapshot(self, name=None, description=None):
snapshot_attr = {'name':name,
'description':description,
}
for k in snapshot_attr.keys():
if snapshot_attr[k]:
hyb_ops.update_aliyun_snapshot(self.snapshot.uuid, **snapshot_attr)
self.sync_aliyun_snapshot()
snapshot_attr_eq = "self.snapshot.%s == '%s'" % (k, snapshot_attr[k])
assert eval(snapshot_attr_eq)
def del_aliyun_snapshot(self, remote=True):
if remote:
hyb_ops.del_aliyun_snapshot_remote(self.snapshot.uuid)
hyb_ops.sync_aliyun_snapshot_from_remote(self.datacenter.uuid)
else:
hyb_ops.del_aliyun_snapshot_in_local(self.snapshot.uuid)
self.check_resource('delete', 'snapshotId', self.snapshot.snapshotId, 'query_aliyun_snapshot_local')
def sync_vpn_gateway(self):
hyb_ops.sync_vpc_vpn_gateway_from_remote(self.datacenter.uuid)
self.vpn_gateway = self.check_resource('sync', 'gatewayId', self.vpn_gateway.gatewayId, 'query_vpc_vpn_gateway_local')
def del_vpn_gateway(self):
hyb_ops.del_vpc_vpn_gateway_local(self.vpn_gateway.uuid)
# self.sync_vpn_gateway()
self.check_resource('delete', 'gatewayId', self.vpn_gateway.gatewayId, 'query_vpc_vpn_gateway_local')
def update_vpn_gateway(self, name=None, description=None):
vpn_gateway_attr = {'name':name,
'description':description,
}
for k in vpn_gateway_attr.keys():
if vpn_gateway_attr[k]:
hyb_ops.update_vpc_vpn_gateway(self.vpn_gateway.uuid, **vpn_gateway_attr)
self.sync_vpn_gateway()
vpn_gateway_attr_eq = "self.vpn_gateway.%s == '%s'" % (k, vpn_gateway_attr[k])
assert eval(vpn_gateway_attr_eq)
def create_eip(self):
self.eip = hyb_ops.create_hybrid_eip(self.datacenter.uuid, 'zstack-test-eip', '1')
self.eip_create = True
self.check_resource('create', 'eipId', self.eip.eipId, 'query_hybrid_eip_local')
def sync_eip(self, return_val=False):
hyb_ops.sync_hybrid_eip_from_remote(self.datacenter.uuid)
if return_val:
return self.check_resource('sync', 'eipId', self.eip.eipId, 'query_hybrid_eip_local')
def del_eip(self, remote=True):
if remote:
hyb_ops.del_hybrid_eip_remote(self.eip.uuid)
self.sync_eip()
else:
hyb_ops.del_hybrid_eip_local(self.eip.uuid)
self.check_resource('delete', 'eipId', self.eip.eipId, 'query_hybrid_eip_local')
def get_eip(self, in_use=False, sync_eip=False):
self.sync_eip()
eip_all = hyb_ops.query_hybrid_eip_local()
if sync_eip:
self.eip = [eip for eip in eip_all if eip.eipId == self.eip.eipId][0]
elif in_use:
self.eip = [e for e in eip_all if e.allocateResourceUuid == self.ecs_instance.uuid][0]
else:
eip_available = [eip for eip in eip_all if eip.status.lower() == 'available']
if eip_available:
self.eip = eip_available[0]
else:
self.create_eip()
def update_eip(self, name=None, description=None):
eip_attr = {'name':name,
'description':description,
}
for k in eip_attr.keys():
if eip_attr[k]:
hyb_ops.update_hybrid_eip(self.eip.uuid, **eip_attr)
self.eip = self.sync_eip(return_val=True)
eip_attr_eq = "self.eip.%s == '%s'" % (k, eip_attr[k])
assert eval(eip_attr_eq)
def attach_eip_to_ecs(self):
hyb_ops.attach_hybrid_eip_to_ecs(self.eip.uuid, self.ecs_instance.uuid)
self.get_eip(sync_eip=True)
assert self.eip.allocateResourceUuid == self.ecs_instance.uuid
def detach_eip_from_ecs(self):
hyb_ops.detach_hybrid_eip_from_ecs(self.eip.uuid)
self.get_eip(sync_eip=True)
assert self.eip.status.lower() == 'available'
def check_eip_accessibility(self, eip):
# self.get_eip(in_use=True)
cmd = "sshpass -p Password123 ssh -o StrictHostKeyChecking=no root@%s 'ls /'" % eip
for _ in xrange(60):
cmd_status = commands.getstatusoutput(cmd)[0]
if cmd_status == 0:
break
else:
time.sleep(3)
assert cmd_status == 0, "Login Ecs via public ip failed!"
def create_vpc(self):
self.vpc_name = 'zstack-test-vpc-%s' % _postfix
self.vpc = hyb_ops.create_ecs_vpc_remote(self.datacenter.uuid, self.vpc_name, self.vr_name, '172.16.0.0/12')
time.sleep(20)
self.check_resource('create', 'ecsVpcId', self.vpc.ecsVpcId, 'query_ecs_vpc_local')
def del_vpc(self, remote=True):
if remote:
hyb_ops.del_ecs_vpc_remote(self.vpc.uuid)
self.sync_vpc()
else:
hyb_ops.del_ecs_vpc_local(self.vpc.uuid)
self.check_resource('delete', 'ecsVpcId', self.vpc.ecsVpcId, 'query_ecs_vpc_local')
def sync_vpc(self, return_val=False):
hyb_ops.sync_ecs_vpc_from_remote(self.datacenter.uuid)
if return_val:
return self.check_resource('sync', 'ecsVpcId', self.vpc.ecsVpcId, 'query_ecs_vpc_local')
def get_vpc(self, has_vpn_gateway=False):
self.sync_vpc(return_val=False)
vpc_all = hyb_ops.query_ecs_vpc_local()
if has_vpn_gateway:
self.sync_vswitch()
cond_vs = res_ops.gen_query_conditions('uuid', '=', self.vpn_gateway.vSwitchUuid)
vs = hyb_ops.query_ecs_vswitch_local(cond_vs)[0]
ecs_vpc = [vpc for vpc in vpc_all if vpc.uuid == vs.ecsVpcUuid]
else:
ecs_vpc = [vpc for vpc in vpc_all if vpc.status.lower() == 'available']
if ecs_vpc:
self.vpc = ecs_vpc[0]
else:
self.create_vpc()
def update_vpc(self, name=None, description=None):
vpc_attr = {'name':name,
'description':description,
}
for k in vpc_attr.keys():
if vpc_attr[k]:
hyb_ops.update_ecs_vpc(self.vpc.uuid, **vpc_attr)
self.vpc = self.sync_vpc(return_val=True)
vpc_attr_eq = "self.vpc.%s == '%s'" % (k, vpc_attr[k])
assert eval(vpc_attr_eq)
def create_vswitch(self):
self.sync_vswitch()
cond_vpc_vs = res_ops.gen_query_conditions('ecsVpcUuid', '=', self.vpc.uuid)
vpc_vs = hyb_ops.query_ecs_vswitch_local(cond_vpc_vs)
if vpc_vs:
vs_cidr = [vs.cidrBlock.split('.')[-2] for vs in vpc_vs]
cidr_val = list(set(str(i) for i in xrange(255)).difference(set(vs_cidr)))
else:
cidr_val = [str(i) for i in xrange(200, 255)]
vpc_cidr_list = self.vpc.cidrBlock.split('.')
vpc_cidr_list[2] = random.choice(cidr_val)
vpc_cidr_list[3] = '0/24'
vswitch_cidr = '.'.join(vpc_cidr_list)
self.vs_name = 'zstack-test-vswitch-%s' % _postfix
self.vswitch = hyb_ops.create_ecs_vswtich_remote(self.vpc.uuid, self.iz.uuid, self.vs_name, vswitch_cidr)
time.sleep(10)
self.check_resource('create', 'vSwitchId', self.vswitch.vSwitchId, 'query_ecs_vswitch_local')
def del_vswitch(self, remote=True):
if remote:
hyb_ops.del_ecs_vswitch_remote(self.vswitch.uuid)
self.sync_vswitch()
else:
hyb_ops.del_ecs_vswitch_in_local(self.vswitch.uuid)
self.check_resource('delete', 'vSwitchId', self.vswitch.vSwitchId, 'query_ecs_vswitch_local')
def sync_vswitch(self, return_val=False):
hyb_ops.sync_ecs_vswitch_from_remote(self.datacenter.uuid)
if return_val:
return self.check_resource('sync', 'vSwitchId', self.vswitch.vSwitchId, 'query_ecs_vswitch_local')
def get_vswitch(self):
self.sync_vswitch()
condition = res_ops.gen_query_conditions('ecsVpcUuid', '=', self.vpc.uuid)
vswitch = hyb_ops.query_ecs_vswitch_local(condition)
if vswitch:
self.vswitch = vswitch[0]
else:
self.create_vswitch()
def update_vswitch(self, name=None, description=None):
vswitch_attr = {'name':name,
'description':description,
}
for k in vswitch_attr.keys():
if vswitch_attr[k]:
hyb_ops.update_ecs_vswitch(self.vswitch.uuid, **vswitch_attr)
self.vswitch = self.sync_vswitch(return_val=True)
vswitch_attr_eq = "self.vswitch.%s == '%s'" % (k, vswitch_attr[k])
assert eval(vswitch_attr_eq)
def create_sg(self):
sg_name = 'zstack-test-ecs-security-group-%s' % _postfix
self.sg = hyb_ops.create_ecs_security_group_remote(sg_name, self.vpc.uuid)
time.sleep(20)
self.check_resource('create', 'securityGroupId', self.sg.securityGroupId, 'query_ecs_security_group_local')
self.sg_create = True
def sync_sg(self, return_val=False):
hyb_ops.sync_ecs_security_group_from_remote(self.vpc.uuid)
if return_val:
return self.check_resource('sync', 'securityGroupId', self.sg.securityGroupId, 'query_ecs_security_group_local')
def del_sg(self, remote=True):
if remote:
self.sg = self.sync_sg(return_val=True)
hyb_ops.del_ecs_security_group_remote(self.sg.uuid)
else:
hyb_ops.del_ecs_security_group_in_local(self.sg.uuid)
self.check_resource('delete', 'securityGroupId', self.sg.securityGroupId, 'query_ecs_security_group_local')
def get_sg(self):
self.sync_sg()
sg_local = hyb_ops.query_ecs_security_group_local()
ecs_security_group = [sg for sg in sg_local if sg.ecsVpcUuid == self.vpc.uuid]
if ecs_security_group:
self.sg = ecs_security_group[0]
else:
self.create_sg()
def update_sg(self, name=None, description=None):
sg_attr = {'name':name,
'description':description,
}
for k in sg_attr.keys():
if sg_attr[k]:
hyb_ops.update_ecs_security_group(self.sg.uuid, **sg_attr)
self.sg = self.sync_sg(return_val=True)
sg_attr_eq = "self.sg.%s == '%s'" % (k, sg_attr[k])
assert eval(sg_attr_eq)
def create_sg_rule(self):
self.sg_rule = []
for cidr in ['172.20.0.0/24', '172.20.0.100/24', '0.0.0.0/0']:
self.sg_rule.append(hyb_ops.create_ecs_security_group_rule_remote(self.sg.uuid, 'ingress', 'TCP', '445/445', cidr, 'drop', 'intranet', '1'))
self.sg_rule.append(hyb_ops.create_ecs_security_group_rule_remote(self.sg.uuid, 'egress', 'TCP', '80/80', cidr, 'accept', 'intranet', '10'))
time.sleep(30)
self.check_resource('create', 'ecsSecurityGroupUuid', self.sg.uuid, 'query_ecs_security_group_rule_local')
def del_sg_rule(self):
for rule in self.sg_rule:
hyb_ops.del_ecs_security_group_rule_remote(rule.uuid)
time.sleep(10)
hyb_ops.sync_ecs_security_group_rule_from_remote(self.sg.uuid)
self.check_resource('delete', 'ecsSecurityGroupUuid', self.sg.uuid, 'query_ecs_security_group_rule_local')
def get_sg_rule(self):
hyb_ops.sync_ecs_security_group_rule_from_remote(self.sg.uuid)
cond_sg_rule = res_ops.gen_query_conditions('ecsSecurityGroupUuid', '=', self.sg.uuid)
sg_rule = hyb_ops.query_ecs_security_group_rule_local(cond_sg_rule)
if not sg_rule:
hyb_ops.create_ecs_security_group_rule_remote(self.sg.uuid, 'ingress', 'ALL', '-1/-1', '0.0.0.0/0', 'accept', 'intranet', '10')
hyb_ops.create_ecs_security_group_rule_remote(self.sg.uuid, 'egress', 'ALL', '-1/-1', '0.0.0.0/0', 'accept', 'intranet', '10')
assert hyb_ops.query_ecs_security_group_rule_local(cond_sg_rule)
def sync_vr(self):
hyb_ops.sync_aliyun_virtual_router_from_remote(self.vpc.uuid)
self.vr = self.check_resource('sync', 'vrId', self.vpc.vRouterId, 'query_aliyun_virtual_router_local')
def get_vr(self):
self.sync_vr()
assert self.vr
def update_vr(self, name=None, description=None):
vr_attr = {'name':name,
'description':description,
}
for k in vr_attr.keys():
if vr_attr[k]:
hyb_ops.update_aliyun_vr(self.vr.uuid, **vr_attr)
self.sync_vr()
vr_attr_eq = "self.vr.%s == '%s'" % (k, vr_attr[k])
assert eval(vr_attr_eq)
def sync_vbr(self, get_org_name=True):
self.vbr = hyb_ops.sync_virtual_border_router_from_remote(self.datacenter.uuid)[0]
if get_org_name:
self.vbr_name = self.vbr.name
self.vbr_desc = self.vbr.description
def update_vbr(self, name=None, description=None):
if name or description:
vbr_attr = {'name':name,
'description':description,
}
for k in vbr_attr.keys():
if vbr_attr[k]:
hyb_ops.update_vbr(self.vbr.uuid, **vbr_attr)
self.sync_vbr(get_org_name=False)
vbr_attr_eq = "self.vbr.%s == '%s'" % (k, vbr_attr[k])
assert eval(vbr_attr_eq)
else:
hyb_ops.update_vbr(self.vbr.uuid, name=self.vbr_name, description=self.vbr_desc)
def create_user_vpn_gateway(self):
if not self.user_gw_ip:
self.user_gw_ip = '192.168.%s.%s' % (random.randint(1,254), random.randint(1,254))
self.user_vpn_gateway = hyb_ops.create_vpc_user_vpn_gateway(self.datacenter.uuid, gw_ip=self.user_gw_ip, gw_name="zstack-test-user-vpn-gateway")
time.sleep(10)
self.check_resource('create', 'gatewayId', self.user_vpn_gateway.gatewayId, 'query_vpc_user_vpn_gateway_local')
def sync_user_vpn_gateway(self, return_val=False):
hyb_ops.sync_vpc_user_vpn_gateway_from_remote(self.datacenter.uuid)
if return_val:
return self.check_resource('sync', 'gatewayId', self.user_vpn_gateway.gatewayId, 'query_vpc_user_vpn_gateway_local')
def get_user_vpn_gateway(self, vip):
self.sync_user_vpn_gateway()
user_vpn_gw_local = hyb_ops.query_vpc_user_vpn_gateway_local()
user_vpn_gw = [gw for gw in user_vpn_gw_local if gw.ip == vip.ip]
if user_vpn_gw:
self.user_vpn_gateway = user_vpn_gw[0]
else:
self.user_gw_ip = vip.ip
self.create_user_vpn_gateway()
def update_user_vpn_gateway(self, name=None, description=None):
user_vpn_gateway_attr = {'name':name,
'description':description,
}
for k in user_vpn_gateway_attr.keys():
if user_vpn_gateway_attr[k]:
hyb_ops.update_vpc_user_vpn_gateway(self.user_vpn_gateway.uuid, **user_vpn_gateway_attr)
self.user_vpn_gateway = self.sync_user_vpn_gateway(return_val=True)
user_vpn_gateway_attr_eq = "self.user_vpn_gateway.%s == '%s'" % (k, user_vpn_gateway_attr[k])
assert eval(user_vpn_gateway_attr_eq)
def del_user_vpn_gateway(self, remote=True):
if remote:
hyb_ops.del_vpc_user_vpn_gateway_remote(self.user_vpn_gateway.uuid)
self.sync_user_vpn_gateway()
else:
hyb_ops.del_vpc_user_vpn_gateway_local(self.user_vpn_gateway.uuid)
self.check_resource('delete', 'gatewayId', self.user_vpn_gateway.gatewayId, 'query_vpc_user_vpn_gateway_local')
def create_ecs_image(self, check_progress=False):
cond_image = res_ops.gen_query_conditions('name', '=', os.getenv('imageName_s'))
image = res_ops.query_resource(res_ops.IMAGE, cond_image)[0]
bs_uuid = image.backupStorageRefs[0].backupStorageUuid
hyb_ops.update_image_guestOsType(image.uuid, guest_os_type='CentOS')
if check_progress:
create_image_pid = os.fork()
if create_image_pid == 0:
self.ecs_image = hyb_ops.create_ecs_image_from_local_image(bs_uuid, self.datacenter.uuid, image.uuid, name=ECS_IMAGE_NAME)
sys.exit(0)
for _ in xrange(600):
image_progress = hyb_ops.get_create_ecs_image_progress(self.datacenter.uuid, image.uuid)
if image_progress.progress.progress == "99%":
break
else:
time.sleep(1)
os.waitpid(create_image_pid, 0)
assert image_progress.progress.progress == "99%"
else:
self.ecs_image = hyb_ops.create_ecs_image_from_local_image(bs_uuid, self.datacenter.uuid, image.uuid, name=ECS_IMAGE_NAME)
self.check_resource('create', 'ecsImageId', self.ecs_image.ecsImageId, 'query_ecs_image_local')
time.sleep(30)
def sync_ecs_image(self, return_val=False):
hyb_ops.sync_ecs_image_from_remote(self.datacenter.uuid)
hyb_ops.sync_ecs_image_from_remote(self.datacenter.uuid, image_type='system')
assert hyb_ops.query_ecs_image_local(self.cond_image_system)
if return_val:
return self.check_resource('sync', 'ecsImageId', self.ecs_image.ecsImageId, 'query_ecs_image_local')
def del_ecs_image(self, remote=True, system=False):
if remote:
hyb_ops.del_ecs_image_remote(self.ecs_image.uuid)
hyb_ops.sync_ecs_image_from_remote(self.datacenter.uuid)
else:
if system:
image_local = hyb_ops.query_ecs_image_local(self.cond_image_system)
for i in image_local:
hyb_ops.del_ecs_image_in_local(i.uuid)
assert not hyb_ops.query_ecs_image_local(self.cond_image_system)
return
else:
hyb_ops.del_ecs_image_in_local(self.ecs_image.uuid)
self.check_resource('delete', 'ecsImageId', self.ecs_image.ecsImageId, 'query_ecs_image_local')
def update_ecs_image(self, name=None, description=None):
image_attr = {'name':name,
'description':description,
}
for k in image_attr.keys():
if image_attr[k]:
hyb_ops.update_ecs_image(self.ecs_image.uuid, **image_attr)
self.ecs_image = self.sync_ecs_image(return_val=True)
image_attr_eq = "self.ecs_image.%s == '%s'" % (k, image_attr[k])
assert eval(image_attr_eq)
def create_route_entry(self):
self.get_vr()
self.dst_cidr_block = '172.27.%s.0/24' % random.randint(1,254)
self.route_entry = hyb_ops.create_aliyun_vpc_virtualrouter_entry_remote(self.dst_cidr_block, self.vr.uuid, vrouter_type='vrouter', next_hop_type='VpnGateway', next_hop_uuid=self.vpn_gateway.uuid)
time.sleep(30)
self.check_resource('create', 'destinationCidrBlock', self.dst_cidr_block, 'query_aliyun_route_entry_local')
def del_route_entry(self, remote=True):
if remote:
hyb_ops.del_aliyun_route_entry_remote(self.route_entry.uuid)
hyb_ops.sync_route_entry_from_remote(self.vr.uuid, vrouter_type='vrouter')
else:
pass
self.check_resource('delete', 'destinationCidrBlock', self.dst_cidr_block, 'query_aliyun_route_entry_local')
def sync_route_entry(self):
hyb_ops.sync_route_entry_from_remote(self.vr.uuid, vrouter_type='vrouter')
condition = res_ops.gen_query_conditions('virtualRouterUuid', '=', self.vr.uuid)
assert hyb_ops.query_aliyun_route_entry_local(condition)
def create_ecs_instance(self, need_vpn_gateway=False, allocate_eip=False, region_id=None, connect=False):
if need_vpn_gateway:
self.add_datacenter_iz(check_vpn_gateway=True)
self.get_vpc(has_vpn_gateway=True)
elif region_id:
self.add_datacenter_iz(region_id=region_id)
else:
self.add_datacenter_iz()
self.get_vpc()
self.get_vswitch()
if connect:
self.create_sg()
else:
self.get_sg()
self.get_sg_rule()
# Get ECS Instance Type
ecs_instance_type = hyb_ops.get_ecs_instance_type_from_remote(self.iz.uuid)
# Get ECS Image
hyb_ops.sync_ecs_image_from_remote(self.datacenter.uuid)
hyb_ops.sync_ecs_image_from_remote(self.datacenter.uuid, image_type='system')
cond_image_centos = res_ops.gen_query_conditions('platform', '=', 'CentOS')
cond_image_self = cond_image_centos[:]
cond_image_system = cond_image_centos[:]
cond_image_self.extend(res_ops.gen_query_conditions('type', '=', 'self'))
cond_image_system.extend(res_ops.gen_query_conditions('type', '=', 'system'))
ecs_image_centos_all = hyb_ops.query_ecs_image_local(cond_image_centos)
ecs_image_centos_64 = [i for i in ecs_image_centos_all if '64' in i.name]
ecs_image_self = hyb_ops.query_ecs_image_local(cond_image_self)
ecs_image_system_all = hyb_ops.query_ecs_image_local(cond_image_system)
ecs_image_system_64 = [i for i in ecs_image_system_all if '64' in i.name]
ecs_image_system_centos = [i for i in ecs_image_system_all if i.platform == 'CentOS' and 'vhd' in i.name]
image = ecs_image_system_centos[0]
if not allocate_eip:
# image = ecs_image_self[0] if ecs_image_self else ecs_image_centos_64[0]
self.ecs_instance = hyb_ops.create_ecs_instance_from_ecs_image('Password123', image.uuid, self.vswitch.uuid, ecs_bandwidth=1, ecs_security_group_uuid=self.sg.uuid,
instance_type=ecs_instance_type[1].typeId, name=TEST_ECS_NAME, ecs_console_password='A1B2c3')
else:
# image = ecs_image_system_64[0]
self.ecs_instance = hyb_ops.create_ecs_instance_from_ecs_image('Password123', image.uuid, self.vswitch.uuid, ecs_bandwidth=1, ecs_security_group_uuid=self.sg.uuid,
instance_type=ecs_instance_type[1].typeId, allocate_public_ip='true', name=TEST_ECS_NAME, ecs_console_password='a1B2c3')
time.sleep(10)
self.ecs_create = True
def get_ecs_instance(self):
self.add_datacenter_iz(check_ecs=True)
if not self.ecs_instance:
self.create_ecs_instance()
def sync_ecs_instance(self, return_val=False):
hyb_ops.sync_ecs_instance_from_remote(self.datacenter.uuid)
if return_val:
return self.check_resource('sync', 'ecsInstanceId', self.ecs_instance.ecsInstanceId, 'query_ecs_instance_local')
def update_ecs_instance(self, name=None, description=None):
ecs_attr = {'name':name,
'description':description,
}
for k in ecs_attr.keys():
if ecs_attr[k]:
hyb_ops.update_ecs_instance(self.ecs_instance.uuid, **ecs_attr)
self.ecs_instance = self.sync_ecs_instance(return_val=True)
ecs_attr_eq = "self.ecs_instance.%s == '%s'" % (k, ecs_attr[k])
assert eval(ecs_attr_eq)
def stop_ecs(self):
hyb_ops.stop_ecs_instance(self.ecs_instance.uuid)
for _ in xrange(600):
self.sync_ecs_instance()
ecs = [e for e in hyb_ops.query_ecs_instance_local() if e.ecsInstanceId == self.ecs_instance.ecsInstanceId][0]
if ecs.ecsStatus.lower() == "stopped":
break
else:
time.sleep(1)
def wait_ecs_running(self):
for _ in xrange(600):
self.sync_ecs_instance()
ecs_local = hyb_ops.query_ecs_instance_local()
ecs_inv = [e for e in ecs_local if e.ecsInstanceId == self.ecs_instance.ecsInstanceId][0]
if ecs_inv.ecsStatus.lower() == "running":
break
else:
time.sleep(1)
def start_ecs(self):
hyb_ops.start_ecs_instance(self.ecs_instance.uuid)
time.sleep(5)
self.wait_ecs_running()
def reboot_ecs(self):
hyb_ops.reboot_ecs_instance(self.ecs_instance.uuid)
time.sleep(5)
self.wait_ecs_running()
def del_ecs_instance(self, remote=True):
if remote:
# self.stop_ecs()
self.ecs_instance = self.sync_ecs_instance(return_val=True)
hyb_ops.del_ecs_instance(self.ecs_instance.uuid)
self.sync_ecs_instance()
else:
hyb_ops.del_ecs_instance_local(self.ecs_instance.uuid)
self.check_resource('delete', 'ecsInstanceId', self.ecs_instance.ecsInstanceId, 'query_ecs_instance_local')
def create_ipsec(self, pri_l3_uuid, vip):
ipsec_conntion = hyb_ops.query_ipsec_connection()
if ipsec_conntion:
self.ipsec = ipsec_conntion[0]
else:
self.ipsec = ipsec_ops.create_ipsec_connection('ipsec', pri_l3_uuid, self.vpn_gateway.publicIp, 'ZStack.Hybrid.Test123789', vip.uuid, [self.vswitch.cidrBlock],
ike_dh_group=2, ike_encryption_algorithm='3des', policy_encryption_algorithm='3des', pfs='dh-group2')
def create_vpn_connection(self, auth_alg_1='sha1', auth_alg_2='sha1'):
vpn_ike_config = hyb_ops.create_vpn_ike_ipsec_config(name='zstack-test-vpn-ike-config', psk='ZStack.Hybrid.Test123789', local_ip=self.vpn_gateway.publicIp, remote_ip=self.user_vpn_gateway.ip, auth_alg=auth_alg_1)
vpn_ipsec_config = hyb_ops.create_vpn_ipsec_config(name='zstack-test-vpn-ike-config', auth_alg=auth_alg_2)
self.vpn_connection = hyb_ops.create_vpc_vpn_connection(self.user_vpn_gateway.uuid, self.vpn_gateway.uuid, 'zstack-test-ipsec-vpn-connection', self.vswitch.cidrBlock,
self.zstack_cidrs, vpn_ike_config.uuid, vpn_ipsec_config.uuid)
time.sleep(10)
self.check_resource('create', 'connectionId', self.vpn_connection.connectionId, 'query_vpc_vpn_connection_local')
def create_ipsec_vpn_connection(self, check_connectivity=True, check_status=False, use_1_vip=False):
self.vm = create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
# test_obj_dict.add_vm(self.vm)
self.vm.check()
vm_ip = self.vm.vm.vmNics[0].ip
pri_l3_uuid = self.vm.vm.vmNics[0].l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
l3_uuid = test_lib.lib_find_vr_pub_nic(vr).l3NetworkUuid
vip_for_vr = [vip for vip in res_ops.query_resource(res_ops.VIP) if 'vip-for-vrouter' in vip.name]
if use_1_vip:
vip = vip_for_vr[0]
else:
vip_not_for_vr = [vip for vip in res_ops.query_resource(res_ops.VIP) if 'vip-for-vrouter' not in vip.name]
if vip_not_for_vr:
vip = vip_not_for_vr[0]
else:
vip = create_vip('ipsec_vip', l3_uuid).get_vip()
cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid)
self.zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
self.dst_cidr_block = self.zstack_cidrs.replace('1/', '0/')
# _vm_ip = zstack_cidrs.replace('1/', '254/')
# cmd = 'ip a add dev br_eth0_1101 %s' % _vm_ip
time.sleep(10)
if check_connectivity:
self.create_ecs_instance(need_vpn_gateway=True, connect=True)
self.get_eip()
self.attach_eip_to_ecs()
else:
self.add_datacenter_iz(check_vpn_gateway=True)
self.get_vpc(has_vpn_gateway=True)
self.get_vswitch()
self.get_vr()
self.get_user_vpn_gateway(vip)
self.create_vpn_connection()
if check_status:
condition = res_ops.gen_query_conditions('connectionId', '=', self.vpn_connection.connectionId)
vpn_conn = hyb_ops.query_vpc_vpn_connection_local(condition)[0]
assert vpn_conn.status == 'ike_sa_not_established'
self.create_ipsec(pri_l3_uuid, vip)
if check_connectivity:
# Add route entry
self.route_entry = hyb_ops.create_aliyun_vpc_virtualrouter_entry_remote(self.dst_cidr_block, self.vr.uuid, vrouter_type='vrouter', next_hop_type='VpnGateway', next_hop_uuid=self.vpn_gateway.uuid)
ping_ecs_cmd = "sshpass -p password ssh -o StrictHostKeyChecking=no root@%s 'ping %s -c 5 | grep time='" % (vm_ip, self.ecs_instance.privateIpAddress)
# ZStack VM ping Ecs
ping_ecs_cmd_status = commands.getstatusoutput(ping_ecs_cmd)[0]
assert ping_ecs_cmd_status == 0
ping_vm_cmd = "sshpass -p Password123 ssh -o StrictHostKeyChecking=no root@%s 'ping %s -c 5 | grep time='" % (self.eip.eipAddress, vm_ip)
# Ecs ping ZStack VM
ping_vm_cmd_status = commands.getstatusoutput(ping_vm_cmd)[0]
assert ping_vm_cmd_status == 0
test_util.test_pass('Create hybrid IPsec Vpn Connection Test Success')
def sync_vpn_connection(self):
hyb_ops.sync_vpc_vpn_connection_from_remote(self.datacenter.uuid)
condition = res_ops.gen_query_conditions('connectionId', '=', self.vpn_connection.connectionId)
self.vpn_connection = hyb_ops.query_vpc_vpn_connection_local(condition)[0]
def update_vpn_connection(self, name=None, description=None):
vpn_connection_attr = {'name':name,
'description':description,
}
for k in vpn_connection_attr.keys():
if vpn_connection_attr[k]:
hyb_ops.update_vpc_vpn_connection(self.vpn_connection.uuid, **vpn_connection_attr)
self.sync_vpn_connection()
vpn_connection_attr_eq = "self.vpn_connection.%s == '%s'" % (k, vpn_connection_attr[k])
assert eval(vpn_connection_attr_eq)
def del_vpn_connection(self, remote=True):
if remote:
self.sync_vpn_connection()
hyb_ops.del_vpc_vpn_connection_remote(self.vpn_connection.uuid)
hyb_ops.sync_vpc_vpn_connection_from_remote(self.datacenter.uuid)
else:
hyb_ops.del_vpc_vpn_connection_local(self.vpn_connection.uuid)
self.check_resource('delete', 'connectionId', self.vpn_connection.connectionId, 'query_vpc_vpn_connection_local')
def get_ecs_vnc_url(self):
vnc_url = hyb_ops.get_ecs_instance_vnc_url(self.ecs_instance.uuid).vncUrl
req = urllib2.Request(vnc_url)
response = urllib2.urlopen(req)
assert response.code == 200
def tear_down(self):
self.sync_ecs_instance()
ecs_local = hyb_ops.query_ecs_instance_local()
ecs_to_clear = [e for e in ecs_local if e.name == TEST_ECS_NAME]
for ecs in ecs_to_clear:
self.ecs_instance = ecs
self.del_ecs_instance()
res = [self.sg_create, self.disk, self.snapshot, self.eip_create, self.user_vpn_gateway, self.ecs_image]
res_del_func = [self.del_sg, self.del_aliyun_disk, self.del_aliyun_snapshot, self.del_eip, self.del_user_vpn_gateway, self.del_ecs_image]
for i in xrange(len(res)):
if res[i]:
time.sleep(120)
try:
res_del_func[i]()
except:
pass
def create_vlan_vm(l3_name=None, disk_offering_uuids=None, system_tags=None, session_uuid = None, instance_offering_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'vlan_vm', \
disk_offering_uuids, system_tags=system_tags, \
instance_offering_uuid = instance_offering_uuid,
session_uuid = session_uuid)
# parameter: vmname; l3_net: l3_net_description, or [l3_net_uuid,]; image_uuid:
def create_vm(l3_uuid_list, image_uuid, vm_name = None, \
disk_offering_uuids = None, default_l3_uuid = None, \
system_tags = None, instance_offering_uuid = None, session_uuid = None, ps_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
if not instance_offering_uuid:
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_l3_uuids(l3_uuid_list)
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_name(vm_name)
vm_creation_option.set_data_disk_uuids(disk_offering_uuids)
vm_creation_option.set_default_l3_uuid(default_l3_uuid)
vm_creation_option.set_system_tags(system_tags)
vm_creation_option.set_session_uuid(session_uuid)
vm_creation_option.set_ps_uuid(ps_uuid)
vm = zstack_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
return vm
def create_vr_vm(test_obj_dict, l3_name, session_uuid = None):
'''
'''
vr_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid)
temp_vm = None
if not vrs:
#create temp_vm1 for getting vlan1's vr for test pf_vm portforwarding
temp_vm = create_vlan_vm(l3_name, session_uuid = session_uuid)
test_obj_dict.add_vm(temp_vm)
vr = test_lib.lib_find_vr_by_vm(temp_vm.vm)[0]
temp_vm.destroy(session_uuid)
test_obj_dict.rm_vm(temp_vm)
else:
vr = vrs[0]
if not test_lib.lib_is_vm_running(vr):
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_skip('vr: %s is not running. Will skip test.' % vr.uuid)
return vr
def create_vip(vip_name=None, l3_uuid=None, session_uuid = None):
if not vip_name:
vip_name = 'test vip'
if not l3_uuid:
l3_name = os.environ.get('l3PublicNetworkName')
l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vip_creation_option = test_util.VipOption()
vip_creation_option.set_name(vip_name)
vip_creation_option.set_l3_uuid(l3_uuid)
vip_creation_option.set_session_uuid(session_uuid)
vip = zstack_vip_header.ZstackTestVip()
vip.set_creation_option(vip_creation_option)
vip.create()
return vip
| [
"[email protected]"
] | |
0779a1edd54738654a758385bdb33e9439b926a2 | a833546104577c7edf624f390224289b429ca551 | /data_py/test/studyClass.py | 2e0a03994a505b54eb6dfc9a74b907b688c8bf11 | [] | no_license | xiaof-github/stocklist | 2867f85bf8d9d6a72e7ff15be18df3a0635115c3 | dc43498ec25498bae12cd47893d58e424820b9dd | refs/heads/master | 2021-01-09T06:30:19.580554 | 2017-09-13T15:14:41 | 2017-09-13T15:14:41 | 80,996,398 | 3 | 0 | null | 2017-09-13T14:54:56 | 2017-02-05T13:58:10 | JavaScript | UTF-8 | Python | false | false | 1,334 | py | class Student(object):
count = 0
books = []
def __init__(self, name, age):
self.name = name
self.age = age
pass
Student.books.extend(["python", "javascript"])
print "Student book list: %s" % Student.books
# class can add class attribute after class defination
Student.hobbies = ["reading", "jogging", "swimming"]
Student.test = ["test"]
print "Student test list: %s" % Student.test
print dir(Student)
print Student.__name__
print Student.__doc__
print Student.__bases__
print Student.__dict__
print Student.__module__
print Student.__class__
print
wilber = Student("Wilber", 28)
print "%s is %d years old" %(wilber.name, wilber.age)
# class instance can add new attribute
# "gender" is the instance attribute only belongs to wilber
wilber.gender = "male"
print "%s is %s" %(wilber.name, wilber.gender)
# class instance can access class attribute
print dir(wilber)
wilber.books.append("C#")
print wilber.books
print
will = Student("Will", 27)
print "%s is %d years old" %(will.name, will.age)
# will shares the same class attribute with wilber
# will don't have the "gender" attribute that belongs to wilber
print dir(will)
print will.books
print
Hill = Student("Hill", 26)
print "%s is %d years old" %(Hill.name, Hill.age)
Hill.books.append("java")
print Hill.books
print "will: %s" %will.books | [
"[email protected]"
] | |
85b2dfa0de69e2abae6ac8a76301db1e6d1b3290 | 8d97668e0666f0b7e7c82b52bae13ece76435612 | /examples/python/bufr_ecc-448.py | a864aeb6e297b4686e900660b7ed80a888dd6d04 | [
"Apache-2.0"
] | permissive | jdkloe/eccodes | 83e5e6df5100d53a4274c706f5cb6fb07ca23743 | 66700b02d8a74722deafa085dab76e99e9b44fca | refs/heads/master | 2021-12-15T01:35:24.387814 | 2021-12-01T15:55:14 | 2021-12-01T15:55:14 | 243,985,448 | 0 | 0 | NOASSERTION | 2020-02-29T14:42:08 | 2020-02-29T14:42:07 | null | UTF-8 | Python | false | false | 1,838 | py | # (C) Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
from __future__ import print_function
import sys
import traceback
from eccodes import *
INPUT = "../../data/bufr/syno_1.bufr"
VERBOSE = 1 # verbose error reporting
def example():
# open bufr file
f = open(INPUT, "rb")
cnt = 0
# loop for the messages in the file
while 1:
# get handle for message
bufr = codes_bufr_new_from_file(f)
if bufr is None:
break
print("message: %s" % cnt)
# ECC-448: create a new BUFR handle from the message
# of the original
the_message = codes_get_message(bufr)
newbufr = codes_new_from_message(the_message)
codes_set(newbufr, "unpack", 1)
# get BUFR key iterator
iterid = codes_bufr_keys_iterator_new(newbufr)
# loop over the keys
while codes_bufr_keys_iterator_next(iterid):
# print key name
keyname = codes_bufr_keys_iterator_get_name(iterid)
print(" %s" % keyname)
# delete the key iterator
codes_bufr_keys_iterator_delete(iterid)
cnt += 1
# delete handle
codes_release(bufr)
# close the file
f.close()
def main():
try:
example()
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + "\n")
return 1
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
e42b09c7276cceed3af5975712adab3f66799982 | 6893b439bdc5243058fecf5769bd587fd9f56ec9 | /anchorhub/validation/validate_overwrite.py | 4140b4dee9eb3fb3229ea9d94af2b09806dec4fb | [
"Apache-2.0"
] | permissive | samjabrahams/anchorhub | 15f73a5bf3425c2558647d5a5c6eac95ef93c0ef | 5ade359b08297d4003a5f477389c01de9e634b54 | refs/heads/master | 2021-01-10T04:02:42.770945 | 2016-03-04T19:45:52 | 2016-11-18T19:17:50 | 50,009,409 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | """
Functions for validating the overwrite argument in combination with other
arguments
"""
import os.path as path
from anchorhub.exceptions.validationexception import ValidationException
from anchorhub.util.hasattrs import hasattrs
def validate(opts):
"""
Client facing overwrite validation. Checks to see if the opts arguments
contains the attributes 'overwrite', 'input', and 'output'.
:param opts: a namespace containing the attributes 'overwrite', 'input',
and 'output'
:raises ValueError: if the value passed in is not a namespace with the
attributes 'overwrite', 'input', and 'output'
:raises ValidationException: if opts fails any of the validations
:return: True if opts passes the validations
"""
if hasattrs(opts, 'overwrite', 'input', 'output'):
return _validate(opts)
else:
raise ValueError("opts object must have attributes 'overwrite', "
"'input', and 'output.")
def _validate(opts):
"""
Runs validation functions on a namespace containing the attributes
'overwrite' (boolean), 'input' (string), and 'output' (string).
:param opts: a namespace containing the attributes 'overwrite', 'input',
and 'output'
:raises ValidationException: if opts fails any of the validations
:return: True if opts passes the validations
"""
validate_overwrite_different_input_output(opts)
return True
def validate_overwrite_different_input_output(opts):
"""
Make sure that if overwrite is set to False, the input and output folders
are not set to the same location.
:param opts: a namespace containing the attributes 'overwrite', 'input',
and 'output'
:raises ValidationException: if 'input' and 'output' point to the same
directory and 'overwrite' is set to False
:return: True if 'overwrite' is set to True, or 'input'/'output' are
separate directories
"""
if opts.overwrite or path.abspath(opts.input) != path.abspath(opts.output):
return True
else:
raise ValidationException("Input and output directories are the same, "
"but --overwrite / -X flag is not provided.\n"
"Do you want to overwrite your input files? "
"If so, use the following command:\n"
"\tanchorhub -X " + opts.input)
| [
"[email protected]"
] | |
713bb962b877fb213095665a6b71f9e0de8414e9 | cb4de1e6e0ceb4a8221b69c65443fb3d2e98978e | /users/forms.py | 78e2c493dca6f991c03aab3f41759756c8de157b | [] | no_license | jm5833/TTP-FS | b7ffc5b5f89910e6859a25dfbd189b9b6d665740 | 3dfeca71acc58bbfdea7e060b488ed2f2dd499e0 | refs/heads/master | 2020-07-01T21:32:04.818288 | 2019-08-17T01:56:59 | 2019-08-17T01:56:59 | 201,306,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | from django import forms
from users.models import User
from django.contrib.auth.forms import UserCreationForm
#create a custom usercreationform
class RegistrationForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = (
'first_name',
'last_name',
'email',
'password1',
'password2'
)
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
#custom user login form
class LoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
fields = ('email', 'password')
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get('email')
password = cleaned_data.get('password')
if not email:
raise forms.ValidationError('Please type in your email')
elif not password:
raise forms.ValidationError('Password cannot be blank')
| [
"[email protected]"
] | |
8af1f2b9b43cf26c7d092f16479f3b479eed5d23 | 90f52d0348aa0f82dc1f9013faeb7041c8f04cf8 | /wxPython3.0 Docs and Demos/wxPython/samples/wxPIA_book/Chapter-10/popupmenu.py | 5226849ca7224afab2ef1c1e69a3aae5158a74d5 | [] | no_license | resource-jason-org/python-wxPythonTool | 93a25ad93c768ca8b69ba783543cddf7deaf396b | fab6ec3155e6c1ae08ea30a23310006a32d08c36 | refs/heads/master | 2021-06-15T10:58:35.924543 | 2017-04-14T03:39:27 | 2017-04-14T03:39:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
"Popup Menu Example")
self.panel = p = wx.Panel(self)
menu = wx.Menu()
exit = menu.Append(-1, "Exit")
self.Bind(wx.EVT_MENU, self.OnExit, exit)
menuBar = wx.MenuBar()
menuBar.Append(menu, "Menu")
self.SetMenuBar(menuBar)
wx.StaticText(p, -1,
"Right-click on the panel to show a popup menu",
(25,25))
self.popupmenu = wx.Menu()
for text in "one two three four five".split():
item = self.popupmenu.Append(-1, text)
self.Bind(wx.EVT_MENU, self.OnPopupItemSelected, item)
p.Bind(wx.EVT_CONTEXT_MENU, self.OnShowPopup)
def OnShowPopup(self, event):
pos = event.GetPosition()
pos = self.panel.ScreenToClient(pos)
self.panel.PopupMenu(self.popupmenu, pos)
def OnPopupItemSelected(self, event):
item = self.popupmenu.FindItemById(event.GetId())
text = item.GetText()
wx.MessageBox("You selected item '%s'" % text)
def OnExit(self, event):
self.Close()
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
61bd905871c3846fa56e4ddc6b4a859a88c8a0fc | f9081d54071a499736ccc00fe11c58ab2ebe9f89 | /subagents.py | ede9ffee62f605615ec02742792d8a1a9efd0461 | [] | no_license | moridinamael/subagents | 6970f332840620a49cf3b85ce5f1ec63c2fabd61 | 57c654a092ba1e5a72212406b6f4a384f3d8b02c | refs/heads/master | 2020-05-21T12:49:26.518696 | 2019-07-09T03:49:30 | 2019-07-09T03:49:30 | 186,050,878 | 5 | 1 | null | 2019-07-09T03:49:31 | 2019-05-10T20:32:42 | Python | UTF-8 | Python | false | false | 22,894 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 10 14:09:30 2019
@author: Matt
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 04 09:26:27 2019
@author: mfreeman
"""
import random
import math
import matplotlib.pyplot as plt
import copy
class position():
def __init__(self,x,y):
self._x = x
self._y = y
def distance(a,b):
return math.sqrt((a._x-b._x)*(a._x-b._x) + (a._y-b._y)*(a._y-b._y))
class agent():
def __init__(self,location,goals=[],beliefs=[],ply2=False,color="default"):
self._goals = goals
self._beliefs = beliefs
self._location = location
self._prev_locations = [location,location,location,location,location]
self._average_velocity = 0.0
self._suffering = 0.0
for a_goal in self._goals:
if a_goal._pinned == True:
a_goal._preferred_location = self._prev_locations[-2]
self._loc_hist_x = []
self._loc_hist_y = []
self._loc_hist_c = []
self._ply2 = ply2
self._color = color
def random_movement_options(self,count=10):
self._current_options = []
self._current_options_new_pos = []
for ii in range(0,count):
x = random.uniform(-1.0,1.0)
y = math.sqrt(1.0-x*x) * random.choice([-1.0,1.0])
option = position(x,y)
self._current_options.append(option)
self._current_options_new_pos.append(position(option._x+self._location._x,option._y+self._location._y))
def ply2_movement_options(self,count=10):
self._ply2options = {}
for pos in self._current_options_new_pos:
self._ply2options[pos] = []
for jj in range(0,count):
x = random.uniform(-1.0,1.0)
y = math.sqrt(1.0-x*x) * random.choice([-1.0,1.0])
ply2option = position(x+pos._x,y+pos._y)
#ply2option = position(pos._x+pos._x-self._location._x,pos._y+pos._y-self._location._y)
self._ply2options[pos].append(ply2option)
def update_average_velocity(self):
self._average_velocity = distance(self._location,self._prev_locations[0])/5.0
def update_position(self,new_pos):
self._prev_locations.pop(0)
self._prev_locations.append(self._location)
self._location = new_pos
def best_option_by_goal_winner_take_all(self):
overall_strongest_valence = 0.0
#print "New Timestep"
for a_goal in self._goals:
#print " New goal"
current_strongest_valence = -9999999999999999.0
current_strongest_ply2_valence = -9999999999999999.0
for option in self._current_options_new_pos:
velocity_current = distance(option,self._prev_locations[1])/5.0
if self._ply2 == True:
#print " New Option"
for option2ply in self._ply2options[option]:
#print " New 2ply"
distance_ply2 = distance(a_goal._preferred_location,option2ply)
option_ply2_valence = a_goal.compute_total_valence(distance_ply2,velocity_current)
#print " ",distance_ply2, option_ply2_valence, current_strongest_ply2_valence,current_strongest_valence,overall_strongest_valence
if(option_ply2_valence > current_strongest_ply2_valence):
current_strongest_ply2_valence = option_ply2_valence
if(current_strongest_ply2_valence > current_strongest_valence):
current_strongest_valence = current_strongest_ply2_valence
#print "New top current_strongest_valence",current_strongest_valence
best_option = option
else:
distance_current = distance(a_goal._preferred_location,option)
option_valence = a_goal.compute_total_valence(distance_current,velocity_current)
if(option_valence > current_strongest_valence):
current_strongest_valence = option_valence
best_option = option
if(abs(current_strongest_valence) > overall_strongest_valence):
overall_strongest_valence = abs(current_strongest_valence)
best_new_pos = best_option
return best_new_pos,overall_strongest_valence
def best_option_by_goal_summation(self):
current_strongest_valence = -9999999999999999.0
for option in self._current_options_new_pos:
option_summed_valence = 0.0
for a_goal in self._goals:
distance_current = distance(a_goal._preferred_location,option)
velocity_current = distance(option,self._prev_locations[1])/5.0
option_valence = a_goal.compute_total_valence(distance_current,velocity_current) #compute_distance_valence(distance_current)
option_summed_valence += option_valence
if(option_summed_valence > current_strongest_valence):
current_strongest_valence = option_summed_valence
best_option = option
return best_option,current_strongest_valence
def compute_suffering(self):
self._suffering = 0.0
for a_goal in self._goals:
current_distance = distance(a_goal._preferred_location,self._location)
current_velocity = distance(self._location,self._prev_locations[0])/5.0
self._suffering += a_goal.compute_total_valence(current_distance,current_velocity)
return self._suffering
def update_agent_satisfaction(self):
for a_goal in self._goals:
a_goal.update_satisfaction()
distance_current = distance(a_goal._preferred_location,self._location)
velocity_current = distance(self._prev_locations[0],self._location)/5.0
a_goal.detect_distance_satisfaction_acheivement(distance_current)
a_goal.detect_velocity_satisfaction_acheivement(velocity_current)
class goal_like():
def __init__(self,
base_dist_metric,
dist_decay,
base_time_metric,
time_decay,
preferred_location,
satisfaction_threshold,
base_velocity_metric=0.0,
min_velocity=-1.0,
subgoals=[]):
self._base_dist_metric = base_dist_metric
self._dist_decay = dist_decay
self._base_time_metric = base_time_metric
self._time_decay = time_decay
self._pinned = False
self._has_friend = False
if type(preferred_location) == type("pinned"):
if preferred_location == "pinned":
self._pinned = True
elif preferred_location == "friend":
self._has_friend = True
else:
print "INVALID PREFERRED_LOCATION"
else:
self._preferred_location = preferred_location
self._pinned = False
self._base_velocity_metric = base_velocity_metric
self._min_velocity = min_velocity
self._satisfaction_threshold = satisfaction_threshold
self._satisfaction = 0.0
self._subgoals = subgoals
def detect_distance_satisfaction_acheivement(self,current_distance):
if current_distance < self._satisfaction_threshold:
self._satisfaction = 1.0
def set_friend(self,friend):
self._friend = friend
def detect_velocity_satisfaction_acheivement(self,current_velocity):
if self._min_velocity < 0.0:
return 0.0
elif current_velocity > self._min_velocity:
self._satisfaction = 1.0
return 0.0
else:
return 1.0
def compute_total_valence(self,current_distance,current_velocity):
distance_valence = self.compute_distance_valence(current_distance)
velocity_valence = self.compute_velocity_valence(current_velocity)
time_valence_mod = self.compute_time_valence_modifier(current_distance)
return ( distance_valence + velocity_valence ) * time_valence_mod
def compute_distance_valence(self,current_distance):
return self._base_dist_metric / pow(current_distance,self._dist_decay)
def compute_velocity_valence(self,current_velocity):
return self._base_velocity_metric * self.detect_velocity_satisfaction_acheivement(current_velocity)
def compute_time_valence_modifier(self,current_distance):
return self._base_time_metric * ( 1.0 - self._satisfaction )
def update_satisfaction(self):
self._satisfaction = self._satisfaction * self._time_decay
#class belief_like():
class world():
def __init__(self,agents,choice_option="winner_take_all"):
self._agents = agents
self._choice_option = choice_option
def update_world(self,count):
for an_agent in self._agents:
an_agent._loc_hist_x.append(an_agent._location._x)
an_agent._loc_hist_y.append(an_agent._location._y)
an_agent._loc_hist_c.append(count)
an_agent.random_movement_options()
# Must update preferred locations before search for best option.
for a_goal in an_agent._goals:
if a_goal._pinned == True:
a_goal._preferred_location = an_agent._prev_locations[-2]
elif a_goal._has_friend == True:
a_goal._preferred_location = a_goal._friend._location
if(an_agent._ply2 == True):
an_agent.ply2_movement_options()
if self._choice_option == "winner_take_all":
best_new_pos, strongest_valence = an_agent.best_option_by_goal_winner_take_all()
elif self._choice_option == "summation":
best_new_pos, strongest_valence = an_agent.best_option_by_goal_summation()
an_agent.update_position(best_new_pos)
an_agent.update_average_velocity()
an_agent.update_agent_satisfaction()
def run_world(self,timesteps):
for kk in range(0,timesteps):
self.update_world(kk)
def plot_world(self):
plt.figure(figsize=(10,10))
for an_agent in self._agents:
for a_goal in an_agent._goals:
x = a_goal._preferred_location._x
y = a_goal._preferred_location._y
radius = a_goal._satisfaction_threshold
if a_goal._base_dist_metric < 0.0:
color = "r"
size = 50.0*50.0
else:
color = "g"
size = radius*radius*radius*radius*2
plt.scatter(x,y,s=size,c=color)
for an_agent in self._agents:
if(an_agent._color == "default"):
plt.scatter(an_agent._loc_hist_x,an_agent._loc_hist_y,c=an_agent._loc_hist_c,cmap="Accent",s=4.0,linewidths=0.0) #,marker=".",linestyle="")
else:
plt.scatter(an_agent._loc_hist_x,an_agent._loc_hist_y,c=an_agent._color,s=4.0,linewidths=0.0) #,marker=".",linestyle="")
plt.ylim([-10,70])
plt.xlim([-40,40])
plt.axis('off')
lowest_suffering = 99999999999999.0
for jj in range(0,1):
"""
trial_x,trial_y,trial_a,trial_threshold = random.uniform(0.0,1.0) * 100.0, random.uniform(0.0,1.0) * 100.0,random.uniform(-5.0,5.0),random.uniform(0.0,10.0)
goal_t = goal_like(trial_a, 2.0, 1.0,0.999,position(trial_x,trial_y),trial_threshold) # generic
cumulative_suffering = 0.0
"""
"""
Changing repulsive valence from -0.4 to -0.5 makes the difference in whether it will ever reach the upper goalstate.
"""
goal_1 = goal_like(1.000, 2.0, 1.0,0.99,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.99,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.0) # velocity
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_5],beliefs=[])
world_0a = world([agent_1])
# Aversive obstacle
goal_1 = goal_like(1.000, 2.0, 1.0,0.99,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.99,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.7, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.0) # velocity
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_5],beliefs=[])
world_0b = world([agent_1])
# Avoid obstacle
goal_1 = goal_like(1.000, 2.0, 1.0,0.99,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.99,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.7, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_4b = goal_like(1.0, 2.0, 1.0,0.99,position(20.0,40.0), 5.0) # attractive
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.0) # velocity
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_4b],beliefs=[])
world_0c = world([agent_1])
# With a cell phone
goal_1 = goal_like(1.000, 2.0, 1.0,0.999,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.0) # velocity
goal_cell = goal_like(0.0010, 2.0, 1.0,0.9,"pinned", 0.0) # cell phone
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_5,goal_cell],beliefs=[])
world_1 = world([agent_1])
# With a high movement preference
goal_1 = goal_like(1.000, 2.0, 1.0,0.999,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.8) # velocity
#goal_cell = goal_like(0.10, 2.0, 1.0,0.5,"pinned", 0.0) # cell phone
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_5],beliefs=[])
world_2a = world([agent_1])
# With a moderate movement preference
goal_1 = goal_like(1.000, 2.0, 1.0,0.999,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.3) # velocity
#goal_cell = goal_like(0.10, 2.0, 1.0,0.5,"pinned", 0.0) # cell phone
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_5],beliefs=[])
world_2b = world([agent_1])
"""
Changing repulsive valence from -0.4 to -0.5 makes the difference in whether it will ever reach the upper goalstate.
"""
goal_1 = goal_like(1.000, 2.0, 1.0,0.999,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.000, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
#goal_3 = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.000, 2.0, 1.0,0.999,position(0.0,60.0), 5.0) # attractive
#goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.0) # velocity
goal_6 = goal_like(1.000, 2.0, 1.0,0.999,position(50.0,70.0), 5.0) # attractive
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_4,goal_6],beliefs=[],ply2=False,color='b')
agent_2 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_4,goal_6],beliefs=[],ply2=True,color='r')
#agent_3 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_4],beliefs=[],ply2=False,color='g')
world_3 = world([copy.deepcopy(agent_1),copy.deepcopy(agent_2)])
# With higher satisfiability
goal_1 = goal_like(1.000, 2.0, 1.0,0.99,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.99,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-35.0, 3.0, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
#goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.3) # velocity
#goal_cell = goal_like(0.10, 2.0, 1.0,0.5,"pinned", 0.0) # cell phone
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4],beliefs=[])
world_4 = world([agent_1])
# Lovers
goal_1 = goal_like(1.000, 2.0, 1.0,0.999,position(-15.0,15.0),5.0) # attractive
goal_2a = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
goal_2b = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
goal_3a = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_3b = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_friend_a = goal_like(0.1, 2.0, 1.0,0.99,"friend", 0.0) # a friend
goal_friend_b = goal_like(0.1, 2.0, 1.0,0.99,"friend", 0.0) # a friend
agent_a = agent(location=position(0.0,0.0),goals=[goal_1,goal_2a,goal_3a,goal_friend_a],beliefs=[],color='r')
agent_b = agent(location=position(0.0,0.0),goals=[goal_2b,goal_3b,goal_4,goal_friend_b],beliefs=[],color='b')
agent_a._goals[3].set_friend(agent_b)
agent_b._goals[3].set_friend(agent_a)
world_5 = world([agent_a,agent_b])
# Friends
goal_1 = goal_like(1.000, 2.0, 1.0,0.999,position(-15.0,15.0),5.0) # attractive
goal_2a = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
goal_2b = goal_like(1.745, 2.0, 1.0,0.999,position(15.0,15.0), 5.0) # attractive
#goal_3a = goal_like(-0.2, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
#goal_3b = goal_like(-0.2, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.999,position(0.0,60.0), 5.0) # attractive
goal_friend_a = goal_like(0.1, 2.0, 1.0,0.99,"friend", 0.5) # a friend
goal_friend_b = goal_like(0.1, 2.0, 1.0,0.99,"friend", 0.5) # a friend
"""
goal_optima = goal_like(-2.7857, 2.0, 1.0,0.999,position(35.248,26.075),1.4493) # generic
"""
agent_a = agent(location=position(0.0,0.0),goals=[goal_1,goal_2a,goal_friend_a],beliefs=[],color='r')
agent_b = agent(location=position(0.0,0.0),goals=[goal_2b,goal_4,goal_friend_b],beliefs=[],color='b')
agent_a._goals[2].set_friend(agent_b)
agent_b._goals[2].set_friend(agent_a)
world_6 = world([agent_a,agent_b])
# New goals are disruptive (contrast with #1)
goal_1 = goal_like(1.000, 2.0, 1.0,0.99,position(-15.0,15.0),5.0) # attractive
goal_2 = goal_like(1.745, 2.0, 1.0,0.99,position(15.0,15.0), 5.0) # attractive
goal_3 = goal_like(-0.4, 1.5, 0.15,0.8, position(0.0,27.0), 0.0) # repulsive
goal_4 = goal_like(1.745, 2.0, 1.0,0.99,position(0.0,60.0), 5.0) # attractive
goal_4b = goal_like(2.745, 2.0, 1.0,0.99,position(30.0,60.0), 5.0) # attractive # New "disruptive" goal
goal_5 = goal_like(0.000, 2.0, 1.0,0.1,position(0.0,0.0), 0.0, base_velocity_metric=1.0, min_velocity = 0.0) # velocity
agent_1 = agent(location=position(0.0,0.0),goals=[goal_1,goal_2,goal_3,goal_4,goal_4b,goal_5],beliefs=[])
world_7 = world([agent_1])
# Summation choice option
world_8 = world([copy.deepcopy(agent_1)],choice_option="summation")
# Beliefs ... concept of meta-motivational expectation decay as you hedonically adapt
# to the pursuit of the goal as being status quo/baseline.
iterations = 10000
print "Baseline"
world_0a.run_world(iterations)
world_0a.plot_world()
plt.show()
print "Aversive obstacle"
world_0b.run_world(iterations)
world_0b.plot_world()
plt.show()
print "Avoid obstacle"
world_0c.run_world(iterations)
world_0c.plot_world()
plt.show()
print "Cell phone"
world_1.run_world(iterations)
world_1.plot_world()
plt.show()
print "High movement preference"
world_2a.run_world(iterations)
world_2a.plot_world()
plt.show()
print "Moderate movement preference"
world_2b.run_world(iterations)
world_2b.plot_world()
plt.show()
plt.xlim(-20,60)
plt.ylim(0,80)
print "2-ply lookahead in decision making"
world_3.run_world(iterations)
world_3.plot_world()
plt.show()
print "With higher goal satisfiability"
world_4.run_world(iterations)
world_4.plot_world()
plt.show()
print "Lovers"
world_5.run_world(iterations)
world_5.plot_world()
plt.show()
print "Friends"
world_6.run_world(iterations)
world_6.plot_world()
plt.show()
print "New goals are disruptive"
world_7.run_world(iterations)
world_7.plot_world()
plt.show()
print "Summation choice option"
world_8.run_world(iterations)
world_8.plot_world()
plt.show()
"""
cumulative_suffering += agent_1.compute_suffering()
"""
"""
print trial_x,trial_y,trial_a,trial_threshold,":",cumulative_suffering
if cumulative_suffering < lowest_suffering:
lowest_suffering = cumulative_suffering
best_case = [trial_x,trial_y,trial_a,trial_threshold,cumulative_suffering]
"""
| [
"[email protected]"
] | |
55cadd97907633b2c24c61b2e9a54c14c2564999 | bcbddf91c1a59438e5508f9d92cb213a39df3448 | /imagegen/nodes/noise_node.py | d3561f86d50ba9e26840adb7fe51def634148d82 | [] | no_license | nfactorial/imagegen | 1323b9a1112a79c39a153df428345155f7feb048 | 7420f8cdcea12fddb92bb8dadfc8e0bb6bc0f165 | refs/heads/master | 2021-01-10T07:09:12.470665 | 2016-03-02T17:25:48 | 2016-03-02T17:25:48 | 52,427,109 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | """
Copyright 2016 nfactorial
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import noise
from ..color import Color
from ..parameter import ParameterDefinition
from ..node_registry import register_node
NOISE_INPUT = [
ParameterDefinition('octaves',
param_type='int',
default_value=1),
ParameterDefinition('frequency',
param_type='scalar',
default_value=4.0)
]
def evaluate_noise(eval_info):
"""
:param eval_info: Parameters describing the sample currently being evaluated.
:return: The evaluated value at the supplied sample location.
"""
octaves = eval_info.evaluate('octaves', eval_info.x, eval_info.y)
frequency = eval_info.evaluate('frequency', eval_info.x, eval_info.y)
frequency_x = frequency / eval_info.image_size[0] * octaves
frequency_y = frequency / eval_info.image_size[0] * octaves
pnoise = noise.pnoise2(eval_info.x / frequency_x, eval_info.y / frequency_y, octaves=octaves)
pnoise = 0.5 + pnoise / 2.0
return Color(pnoise, pnoise, pnoise, pnoise)
register_node('noise', evaluate_noise, NOISE_INPUT, output='color',
description='Generates a random 2D noise value.')
| [
"[email protected]"
] | |
fbe0979bb9bfd1111ac0cd12f14a2aecde30e551 | 892266713e500efa5ac04e1b8de812200410c956 | /devset.py | cd8b6e2d344c504aedbc001fde9be6ebc8fc85de | [
"BSD-2-Clause"
] | permissive | martinphellwig/django-g11n | 972eb95128637ec0b21efabad6b40ba02c30356c | 94eb9da7d7027061873cd44356fdf3378cdb3820 | refs/heads/master | 2020-08-29T12:24:04.687019 | 2016-10-10T15:54:32 | 2016-10-10T15:54:32 | 218,030,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | #! /usr/bin/env python
"""
Developer Reset.
"""
import os
APP = 'django_g11n'
DIR = os.path.dirname(os.path.abspath(__file__))
def get_last_migration_file():
"Fetch the latest migration file."
_ = os.path.join(DIR, APP, 'migrations')
_ = [os.path.join(_, item) for item in os.listdir(_) if not item.startswith('_')]
_.sort()
if len(_) > 0:
return _[-1]
else:
return None
def modify_migration():
"Modify migration, add pylint disable line."
path = get_last_migration_file()
if path is None:
return
text = '# pylint: disable=invalid-name, missing-docstring, line-too-long\n'
with open(path, 'r+') as file_open:
data = file_open.readlines()
data.insert(1, text)
file_open.seek(0)
file_open.write(''.join(data))
def execute_shell(command, prefix='python manage.py', pipe=None):
"Execute shell python manage.py"
import subprocess
cmd = prefix + ' ' + command
if pipe is not None:
cmd = pipe + ' | ' + cmd
subprocess.call(cmd, shell=True)
def add_superuser(username, password):
"Add superuser"
from django.contrib.auth.models import User
user = User(username=username)
user.set_password(password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
def remove_db():
"remove the db if it exists"
_ = os.path.join(DIR, 'db.sqlite3')
if os.path.exists(_):
os.remove(_)
def remove_last_migration():
"remove last migration file."
_ = get_last_migration_file()
if _ is not None:
os.remove(_)
def add_migrations():
"set up the new migrations and migrate"
execute_shell('makemigrations ' + APP)
execute_shell('makemigrations')
execute_shell('migrate')
modify_migration()
def main():
"Executed when this is the interface module"
remove_db()
remove_last_migration()
add_migrations()
#
# This will run a shell which imports this file as a module, this means
# we can execute things in a Django environment.
execute_shell('shell', pipe='echo "import devset"')
#
execute_shell('runserver')
def as_module():
"Executed when this is imported."
add_superuser('admin', 'admin')
if __name__ == '__main__':
main()
else:
as_module()
| [
"martin@localhost"
] | martin@localhost |
41a58d08aeb1f5f3ee5fbd1e3067dbcc9eefbc43 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-3540.py | a875472b054493c30d3909c9a4a41429a81f0434 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,742 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
$Exp
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
1077e3a359f70ea9f79c9681b0bc59bf455afbb9 | 616f5d438c7662d2fedc5b55ec616e702a57cf1a | /downtime.py | 33c4fec10f960e751be99c4b6253e2a17ac818d0 | [
"MIT"
] | permissive | mozilla-it/security-patching | d07015c77bc768c0ae67e224ef23783dc5f51801 | 23cbbf39c5b5ccc47c6a818f8137abaeb55d49f1 | refs/heads/master | 2021-07-03T16:42:12.875642 | 2020-08-04T19:41:46 | 2020-08-04T19:41:46 | 95,240,563 | 0 | 1 | MIT | 2020-08-04T19:41:48 | 2017-06-23T17:11:59 | Python | UTF-8 | Python | false | false | 3,650 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
downtimes.py
'''
import os
import re
import sys
import socket, ssl
sys.dont_write_bytecode = True
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from ConfigParser import SafeConfigParser
class IRC(object):
def __init__(self, network, port, nick):
_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
_socket.connect((network, port))
self.regex = re.compile('NOTICE ' + nick)
self.socket = ssl.wrap_socket(_socket)
def send(self, string):
self.socket.send(string.encode('utf-8'))
def recv(self):
string = ''
x = 0
while True:
x += 1
string += self.socket.recv(4096).decode('utf-8')
match = self.regex.search(string)
if match != None:
break
return string
def downtime(ns):
data = ''
msg = '{botname}: downtime {hostname} {duration} {reason}'.format(**ns.__dict__)
if ns.verbose:
print('msg =', msg)
irc = IRC(ns.network, ns.port, ns.nick)
irc.send('NICK %s\r\n' % ns.nick)
irc.send('USER %s %s %s :My bot\r\n' % (ns.nick, ns.nick, ns.nick))
response = irc.recv()
if ns.verbose:
print(response)
irc.send('JOIN %s %s\r\n' % (ns.channel, ns.key))
irc.send('PRIVMSG %s %s\r\n' % (ns.channel, msg))
# FIXME: check for: downtime-bot: I'm sorry but I cannot find the host or service
irc.send('QUIT\r\n')
def channel(string):
if string.startswith('#'):
return string
return '#' + string
def load(config):
defaults = {}
parser = SafeConfigParser()
if os.path.exists(config):
parser.read([config])
defaults = dict(parser.items('downtime'))
return defaults
if __name__=='__main__':
parser = ArgumentParser(add_help=False)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='turn on verbose printing')
parser.add_argument(
'-C', '--config',
metavar='PATH',
default='./downtime.cfg',
help='default="%(default)s"; optional file to setup default values')
ns, rem = parser.parse_known_args()
defaults = load(ns.config)
parser = ArgumentParser(
parents=[parser],
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.set_defaults(**defaults)
parser.add_argument(
'-B', '--botname',
help='default="%(default)s"; set the botname to be mentioned')
parser.add_argument(
'-N', '--network',
help='default="%(default)s"; set the irc network')
parser.add_argument(
'-p', '--port',
default=6697,
type=int,
help='default="%(default)s"; set the irc port')
parser.add_argument(
'-c', '--channel',
type=channel,
help='default="%(default)s"; set the channel')
parser.add_argument(
'-k', '--key',
help='default="%(default)s"; set the key, if any')
parser.add_argument(
'-n', '--nick',
default='downtime-bot',
help='default="%(default)s"; set the nick to be used')
parser.add_argument(
'-d', '--duration',
default='1h',
help='default="%(default)s"; set the duration of the downtime')
parser.add_argument(
'-r', '--reason',
default='patching',
help='default="%(default)s"; set the reason of the downtime')
parser.add_argument(
'hostname',
help='set the hostname that is being downtimed')
ns = parser.parse_args()
if ns.verbose:
print(ns)
downtime(ns)
| [
"[email protected]"
] | |
fc435a020a77c3ed5c0f0d15795d7431a52254f3 | 6b6cff01c462ae67765315dbe50c9c4aa6711cf8 | /tests/test_page_loader.py | c43db17e27b5a0d2a170b66b508575cafb031008 | [] | no_license | LyuPo7/python-project-lvl3 | cbc49ceb9d1845831aa0edbd873a1cce496517b0 | cf8248b1a11f9b2bcce31365ae3d20487ade403c | refs/heads/main | 2023-04-16T23:10:58.859114 | 2021-05-04T12:43:04 | 2021-05-04T12:43:04 | 304,558,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | # -*- coding:utf-8 -*-
"""Tests for page-loader."""
import os
import pytest
import requests
import tempfile
import requests_mock
from page_loader import page
from page_loader import path
from bs4 import BeautifulSoup as bs
LINK = 'https://pythonjobs.github.io'
HTML = './tests/fixtures/source/pythonjobs-github-io.html'
DIR_TMP = tempfile.mkdtemp()
HTML_EXPECTED = './tests/fixtures/expected/pythonjobs-github-io.html'
DIR_EXPECTED = './tests/fixtures/expected/pythonjobs-github-io_files/'
def extract(link):
"""Extract data from file.
Args:
link(str): file,
Returns:
: string with content of the file
"""
with open(link) as file:
return file.read()
def download(link, html, folder):
"""Run download function with mock."""
with requests_mock.mock() as m:
m.get(link, text=extract(html))
page.download(link, folder)
def test_content():
"""Compare expected and source html content."""
# Expected file content
file_exp = open(HTML_EXPECTED, 'rb')
content_exp = bs(file_exp.read(), 'html.parser')
# Downloaded file content
download(LINK, HTML, DIR_TMP)
html_down = path.create(LINK)[0]
file_down = open(os.path.join(DIR_TMP, html_down))
content_down = bs(file_down, 'html.parser')
assert content_exp.decode() == content_down.decode()
def test_srcs():
"""Compare expected and downloaded srcs lists."""
# Expected srcs list
files_exp = os.listdir(DIR_EXPECTED)
# Downloaded srcs list
dir_down = path.create(LINK)[1]
files_down = os.listdir(os.path.join(DIR_TMP, dir_down))
assert files_exp == files_down | [
"[email protected]"
] | |
748b1a4c649433f18bc779c59fa3d4da540bf330 | bd185738ea6a74d1e76d9fc9d8cbc59f94990842 | /onadata/libs/pagination.py | f3aaf30a3bad15075443aa054f66f133a9d41638 | [
"BSD-2-Clause"
] | permissive | aondiaye/myhelpline | c4ad9e812b3a13c6c3c8bc65028a3d3567fd6a98 | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | refs/heads/master | 2020-12-22T05:32:59.576519 | 2019-10-29T08:52:55 | 2019-10-29T08:52:55 | 236,683,448 | 1 | 0 | NOASSERTION | 2020-01-28T07:50:18 | 2020-01-28T07:50:17 | null | UTF-8 | Python | false | false | 206 | py | from rest_framework.pagination import PageNumberPagination
class StandardPageNumberPagination(PageNumberPagination):
page_size = 1000
page_size_query_param = 'page_size'
max_page_size = 10000
| [
"[email protected]"
] | |
fbcc54fea5b182b3e2383026e517dcaa50974606 | f20516958c39123f204e2bc442c91df7df1cc34a | /amqpstorm/exchange.py | 865a03bd8e75475a400c5bdf1d4068945cb5fa0b | [
"BSD-3-Clause"
] | permissive | bradparks/ReadableWebProxy | 3c2732cff64007afa8318b5b159616a529068322 | 81fbce3083471126942d2e2a298dba9eaf1092b1 | refs/heads/master | 2020-05-29T11:48:40.189530 | 2016-08-25T15:17:14 | 2016-08-25T15:17:14 | 66,568,996 | 0 | 0 | null | 2016-08-25T15:13:39 | 2016-08-25T15:13:39 | null | UTF-8 | Python | false | false | 5,689 | py | """AMQP-Storm Channel.Exchange."""
import logging
from pamqp.specification import Exchange as pamqp_exchange
from amqpstorm import compatibility
from amqpstorm.base import Handler
from amqpstorm.exception import AMQPInvalidArgument
LOGGER = logging.getLogger(__name__)
class Exchange(Handler):
"""AMQP Channel.exchange"""
__slots__ = []
def declare(self, exchange='', exchange_type='direct', passive=False,
durable=False, auto_delete=False, arguments=None):
"""Declare an Exchange.
:param str exchange:
:param str exchange_type:
:param bool passive:
:param bool durable:
:param bool auto_delete:
:param dict arguments:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
elif not compatibility.is_string(exchange_type):
raise AMQPInvalidArgument('exchange_type should be a string')
elif not isinstance(passive, bool):
raise AMQPInvalidArgument('passive should be a boolean')
elif not isinstance(durable, bool):
raise AMQPInvalidArgument('durable should be a boolean')
elif not isinstance(auto_delete, bool):
raise AMQPInvalidArgument('auto_delete should be a boolean')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
declare_frame = pamqp_exchange.Declare(exchange=exchange,
exchange_type=exchange_type,
passive=passive,
durable=durable,
auto_delete=auto_delete,
arguments=arguments)
return self._channel.rpc_request(declare_frame)
def delete(self, exchange='', if_unused=False):
"""Delete an Exchange.
:param str exchange:
:param bool if_unused:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
delete_frame = pamqp_exchange.Delete(exchange=exchange,
if_unused=if_unused)
return self._channel.rpc_request(delete_frame)
def bind(self, destination='', source='', routing_key='',
arguments=None):
"""Bind an Exchange.
:param str destination:
:param str source:
:param str routing_key:
:param dict arguments:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_exchange.Bind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame)
def unbind(self, destination='', source='', routing_key='',
arguments=None):
"""Unbind an Exchange.
:param str destination:
:param str source:
:param str routing_key:
:param dict arguments:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
unbind_frame = pamqp_exchange.Unbind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(unbind_frame)
| [
"[email protected]"
] | |
956a8c2e6f77f02477e08eab6172881f93d5b37c | 5ae3fb122408210fdd99aa02f3fb187059d8e4bf | /django_realtime/wsgi.py | 64a9d66a0a48710b2dcf33ec2a4708641545ec5c | [] | no_license | Tee-py/django_realtime | 1e129a738567eced96ac92b95e128b6f0bb9426b | 65dd68aa1215a2dcf08430bfa54ac26e4f71d528 | refs/heads/master | 2023-01-11T10:37:46.689473 | 2020-11-05T03:26:06 | 2020-11-05T03:26:06 | 310,178,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for django_realtime project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_realtime.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
e3e7ebee71f69fdbdd0c48659e3990cb42319055 | 513913bb77fc7f8f71e67de53d99bc8b33deb04e | /myDict.py | 4c71faa73b213c7bd4cbe1c7b0dd843304bd3dfe | [] | no_license | jmhughes018/camp-game | 3314e883f99a59269ea57b4fe9180f237f874104 | d72bc021ff89474fb2ee209f4ab37772d927126c | refs/heads/master | 2020-04-10T12:20:27.055457 | 2016-09-15T17:24:23 | 2016-09-15T17:24:23 | 68,007,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | myPhrases = ['CACTUS PACT', 'SIMPLE DIMPLE', 'RIP VAN WRINKLE', 'BLUE FISH RED FISH', 'WILDLY MILD', 'TURBO BLURB', 'PORK AND STORK', 'ELMOS ELBOW', 'GIMPY BLIMP', 'BANG BANG SHRIMP', 'MOCHA CHA CHA', 'ORANGE MOCHA FRAP', 'BUMMER SUMMER', 'WINTER LINT', 'FALL WATERFALL', 'DING DONG STAY', 'SPRING DYNASTY', 'EPSOM SALT', 'LALA HOLLA', 'HOLY MOLEY', 'ROTTEN COTTON', 'ICY ICE CREAM', 'BOMB DOT COM', 'CHEM 6A', 'UNDERWATER OVERSEAS', 'HEDGE EDGE', 'HOME PHONE TONE', 'BEN HURRY', 'CONCRETE REPEAT', 'THREE FOUR CHANT', 'YOU TWO WHO', 'ODE TO CHINSTRAPS', 'INCOMPLETE RETWEET', 'SOREN OR LATER', 'COMPANY CAR TALK', 'ONLY HOPEFULLY', 'UNDER THE FLOORBOARD', 'POPARAZZI YAHTZEE', 'NO SENSE INNOCENCE', 'PLAY FOR KEEPS', 'EROSION MOTION', 'GONE FISHIN', 'TWENTY FOUR', 'STARS IN OUR FAULT', 'YUPPIE PUPPIE', 'SUNSHINE STATE', 'EASY PEASY', 'BLACK EYED BLUES', 'FATAL LADLE', 'CRAZY DAISY', 'CAVITY GRAVITY', 'AMERICAN DRUM', 'SECOND HAND SMOKE', 'STAY WOKE', 'CIRCLE RECALL', 'AMATEUR HOUR', 'MIDAS TOUCH', 'HEAD OVER HEELS', 'APRIL TWELVE', 'YELP FOR YETIS', 'RED EYE FLIGHT', 'FREE FOR REAL', 'HELLO SANDY', 'RADIATION EXCLAMATION', 'RESTFUL TEST', 'BLINDING SIDING', 'NEWSIES FLEW THESE', 'SOUVENIR SUITCASE', 'SALTWATER TAFFY', 'LIBERTY DISCO', 'EDGE OF EARTH', 'DELUXE HUGS', 'UNCOUTH HOUTHI', 'SQUARE AND FAIR', 'COOL HAND LUKE', 'NEVER OR NOW', 'TUNA MELT', 'THEN AND NOW', 'ESCARGO CARGO', 'FRIEND OF A FRIEND', 'HE SAID SHE SAID', 'TIBER TIGER', 'FEMMES THAMES', 'PTERODACTYL', 'LEXUS LET US', 'FREE FALL FREE FOR ALL', 'ELEMENTARY WATSON', 'GIANT PEACH', 'CHARLIE AND CHOCOLATE', 'CHAMPION DANNY', 'TRIBBLE DRIBBLE', 'WALKING MAN WALKS', 'SEND A FRIEND', 'FATTENED CALF', 'SWELL LEWIS', 'DR PEPPER', 'COLONEL SANDERS', 'KALAMATA OLIVE', 'METRO FRITO', 'REMEMBER THE EMBERS']
| [
"[email protected]"
] | |
0709557c1f679fa1a41d7157bfe2c991f6adadfc | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/NTWS-AP-IF-MIB.py | d9da09616db8ef8ddc0d2db88e651ab9fd3c63d5 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,408 | py | #
# PySNMP MIB module NTWS-AP-IF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NTWS-AP-IF-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:16:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
IANAifType, = mibBuilder.importSymbols("IANAifType-MIB", "IANAifType")
NtwsApSerialNum, = mibBuilder.importSymbols("NTWS-AP-TC", "NtwsApSerialNum")
ntwsMibs, = mibBuilder.importSymbols("NTWS-ROOT-MIB", "ntwsMibs")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Counter64, IpAddress, iso, Bits, Integer32, TimeTicks, Counter32, ObjectIdentity, ModuleIdentity, MibIdentifier, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "IpAddress", "iso", "Bits", "Integer32", "TimeTicks", "Counter32", "ObjectIdentity", "ModuleIdentity", "MibIdentifier", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "NotificationType")
MacAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "TextualConvention", "DisplayString")
ntwsApIfMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16))
ntwsApIfMib.setRevisions(('2008-11-20 00:01',))
if mibBuilder.loadTexts: ntwsApIfMib.setLastUpdated('200811200001Z')
if mibBuilder.loadTexts: ntwsApIfMib.setOrganization('Nortel Networks')
class NtwsApInterfaceIndex(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 1024)
ntwsApIfMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1))
ntwsApIfTable = MibTable((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1), )
if mibBuilder.loadTexts: ntwsApIfTable.setStatus('current')
ntwsApIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1), ).setIndexNames((0, "NTWS-AP-IF-MIB", "ntwsApIfApSerialNum"), (0, "NTWS-AP-IF-MIB", "ntwsApIfIndex"))
if mibBuilder.loadTexts: ntwsApIfEntry.setStatus('current')
ntwsApIfApSerialNum = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 1), NtwsApSerialNum())
if mibBuilder.loadTexts: ntwsApIfApSerialNum.setStatus('current')
ntwsApIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 2), NtwsApInterfaceIndex())
if mibBuilder.loadTexts: ntwsApIfIndex.setStatus('current')
ntwsApIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfName.setStatus('current')
ntwsApIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 4), IANAifType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfType.setStatus('current')
ntwsApIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfMtu.setStatus('current')
ntwsApIfHighSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfHighSpeed.setStatus('current')
ntwsApIfMac = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 7), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfMac.setStatus('current')
ntwsApIfConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2))
ntwsApIfCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 1))
ntwsApIfGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 2))
ntwsApIfCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 1, 1)).setObjects(("NTWS-AP-IF-MIB", "ntwsApIfBasicGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntwsApIfCompliance = ntwsApIfCompliance.setStatus('current')
ntwsApIfBasicGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 2, 1)).setObjects(("NTWS-AP-IF-MIB", "ntwsApIfName"), ("NTWS-AP-IF-MIB", "ntwsApIfType"), ("NTWS-AP-IF-MIB", "ntwsApIfMtu"), ("NTWS-AP-IF-MIB", "ntwsApIfHighSpeed"), ("NTWS-AP-IF-MIB", "ntwsApIfMac"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntwsApIfBasicGroup = ntwsApIfBasicGroup.setStatus('current')
mibBuilder.exportSymbols("NTWS-AP-IF-MIB", ntwsApIfApSerialNum=ntwsApIfApSerialNum, ntwsApIfConformance=ntwsApIfConformance, ntwsApIfCompliance=ntwsApIfCompliance, PYSNMP_MODULE_ID=ntwsApIfMib, ntwsApIfName=ntwsApIfName, ntwsApIfMib=ntwsApIfMib, ntwsApIfHighSpeed=ntwsApIfHighSpeed, NtwsApInterfaceIndex=NtwsApInterfaceIndex, ntwsApIfBasicGroup=ntwsApIfBasicGroup, ntwsApIfEntry=ntwsApIfEntry, ntwsApIfMac=ntwsApIfMac, ntwsApIfIndex=ntwsApIfIndex, ntwsApIfMtu=ntwsApIfMtu, ntwsApIfType=ntwsApIfType, ntwsApIfTable=ntwsApIfTable, ntwsApIfCompliances=ntwsApIfCompliances, ntwsApIfMibObjects=ntwsApIfMibObjects, ntwsApIfGroups=ntwsApIfGroups)
| [
"[email protected]"
] | |
18970aa0b8d05531fef6be6422178c963d0f2565 | d7fe48137c22a1a75fc671b3a6d27e16a6006318 | /tools/updateeventlist.py | 935c56ae2d3e08a2ef4c82cee4b33a477edc18b1 | [] | no_license | kcsfelty1337/demoinfo-csgo-python | ea180e0925b970ee966b443ea29283aba67c79dd | 9803b698142ede15402938044538f4d22981f15f | refs/heads/master | 2021-05-28T10:24:16.878136 | 2014-07-17T00:31:58 | 2014-07-17T00:31:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | '''
Creates a formatted list of game events
Created on Jul 12, 2014
@author: Chris
'''
from demoinfocsgo.demodump import DemoDump
from demoinfocsgo.proto.netmessages_public_pb2 import svc_GameEventList, CSVCMsg_GameEventList
import sys
import json
import collections
events = {}
_GAMEEVENT_TYPES = {2:"string",
3:"float",
4:"long",
5:"short",
6:"byte",
7:"bool",
8:"uint64",
9:"wstring"}
def on_list_received(msg, data):
t = CSVCMsg_GameEventList()
t.ParseFromString(data)
for desc in t.descriptors:
events[desc.name] = {
"ID": desc.eventid,
"name": desc.name,
"params": {}
}
for key in desc.keys:
events[desc.name]["params"][key.name] = _GAMEEVENT_TYPES[key.type + 1]
if __name__ == '__main__':
demo = DemoDump()
filename = sys.argv[1]
if len(sys.argv) <= 1:
print "updateeventlist.py demofile.dem"
sys.exit()
if demo.open(filename):
print "Beginning parsing"
demo.register_on_netmsg(svc_GameEventList, on_list_received)
demo.dump()
ordered = collections.OrderedDict(sorted(events.items()))
json_data = json.dumps(ordered, indent=4)
print json_data
f = open("../data/game_events.txt", "w")
f.write(json_data)
print "Saved to file data/game_events.txt" | [
"[email protected]"
] | |
80ed617f49dd786e0529d9c3caf25960f00a657b | 9d2a4be0db964ca2061aeae5cc469c19fcbc4c45 | /Language-Day-EL.py | 5c85fed474e17c85f9361857c4fb528a0060b580 | [
"Apache-2.0"
] | permissive | athenian-ct-projects/Language-Day-EL | 464f0242d70f3957aae617a7d6da22d67371d7b5 | 4f8c135e6574335891610b90dac87f8a75f60073 | refs/heads/master | 2021-03-31T15:53:01.021244 | 2020-03-18T02:30:42 | 2020-03-18T02:30:42 | 248,117,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,919 | py | # DO NOT RUN THIS CELL WHILE PLAYING GAME (IT RESETS PROGRESS)
# This sets list to nothing at start of game
fs=[]
# Vowel function:
# Checks if the input (that is a noun) starts with a vowel and inserts a or an
# depending on what it starts with. It also adds the input to the end of the
# list (fs).
def vowel():
if x[0] in "aeiouAEIOU":
va=("an " + x)
else:
va=("a "+ x)
fs.append(va)
# Spanish el/la function
# Checks if the input (that is a noun) ends with a, and inserts la or el
# depending on what the noun ends with. It also adds the input to the end of the list (fs).
def span():
if x.endswith("a"):
vs=("la " + x)
else:
vs=("el " +x)
fs.append(vs)
# Spanish los/las function
# Checks if the input (that is a plural noun) ends with as, and inserts las or
# los depending on what the noun ends with. It also adds the input to the end of the list (fs).
def spanp():
if x.endswith("as"):
vs=("las " + x)
else:
vs=("los " +x)
fs.append(vs)
# Spanish un/una function
# Checks if the input (that is a noun) ends with an a, and inserts una or un
# depending on what the noun ends with. It also adds the input to the end of the list (fs).
def span1():
if x.endswith("a"):
v1=("una " + x)
else:
v1=("un " +x)
fs.append(v1)
# Intro/Instructions code:
# Code prints the instructions
# Input, start, checks if the player has run the first four cells.
start=input("STOP! Before you start, run the previous four cells before starting."
"If you have type ok to start instructions, type help if you haven't: ")
if start==("ok"):
print("Instrucciones para Mad Libs: ")
print("Escribe en el blanco con el parte del discurso: ")
print("Ex. Escribe un sustantivo: 'banana'")
print("En elf fín, va a imprimir tu cuenta: ")
print("Ex. Snow White estaba envenenada de una banana")
print("At the end your story will printed with your inputs. Also, do not run"
" the very first cell while playing. ")
elif start==("help"):
print("Run the past four cells (10-13). There will be no inputs of prints from "
"running these cells. Once you have, restart this cell.")
# example: Try one!
# Prints an example: prints instructions, then prints an input asking for a noun
print("¡Ahora prueba uno!")
y=input("Escribe un sustantivo: ")
# SKIP THIS CELL FOR NOW
# Pick a language code: (only have one language done)
# Function:
# Input that asks for a language, if the input is english then it will start
# in English, if not it will start in Spanish. If the input is neither then
# it will ask you to try again.
def langpick():
lang=input("Pick a language: English or Spanish ")
if (lang=="English" or lang=="english"):
print("Ok let's start our story in English.")
elif (lang=="Spanish" or lang=="spanish"):
print("Ok let's start our story in Spanish.")
else:
print("Try again.")
langpick()
return(lang)
language=langpick()
# The following cells (8-19), asks for inputs for the game. Each one asks for
# a noun, plural noun, verb, adjective, or emotion. Some will be specific to
# feminine or masculine.
# 0
x=input("Excribe un nombre: ")
fs.append(x)
#1
x=input("Excribe un sustantivo: ")
span1()
#2
x=input("Escribe un numero: ")
fs.append(x)
#3
x=input("Escribe una adjetiva femenina: ")
fs.append(x)
#4
x=input("Escribe dos adjectivas femeninas (ex. linda y bonita): ")
fs.append(x)
#5
x=input("Escribe un sunstantivo plural: ")
spanp()
#6
x=input("Escribe un numero: ")
fs.append(x)
#7
x=input("Escribe una adjetiva femenina: ")
fs.append(x)
#8
x=input("Escribe un sustantivo: ")
fs.append(x)
#9
x=input("Escribe un adjetivo masculino: ")
fs.append(x)
# This cell has two inputs on the end of the list, fs, because it is used twice
# in the story with different preceeding words. (The first one only adds the
# input. The second one uses the spanish plural function so los or las will
# preceed the word.)
#10
x=input("Escribe un sustantivo plural: ")
fs.append(x)
#11
spanp()
#12
x=input("Escribe un sustantivo: ")
span1()
#13
fs.append(x)
#14
x=input("Escribe un sustantivo: ")
fs.append(x)
#15
x=input("Escribe una adjetiva feminina: ")
fs.append(x)
# This cell checks if the input is a verb. It checks if the input ends with
# -ar, -er, or -ir. If it doesn't it will ask you to try it again.
#16
x=input("Escribe un infinitivo: ")
while not x.endswith("ir") and x.endswith("ar") and x.endswith("er"):
x=input("Trata otra vez. Escribe un infinitivo: ")
fs.append(x)
#17
x=input("Escribe una emoción masculino: ")
fs.append(x)
#18
x=input("Escribe un sustantivo plural: ")
spanp()
#19
x=input("Escribe un sustantivo: ")
span1()
# Prints out your inputs from the previous cells.
print("Here are your inputs: ")
for y in fs:
print(y)
# The next cells (30-39), print out the finished story using the inputs. It uses
# each input in the list, fs, by calling fs[number input is in the list].
print("Había una vez, había una chica se llama "+fs[0]+". Su madre se murió"
" cuando la chica era muy pequeña. Ella sólo tenía su padre y "+fs[1]+
" para compañía. Ahora ella tenía "+fs[2]+" hermanastras. Ellas eran"
" maliciosas y eran las hijas de la nueva esposa de su padre."
"La "+fs[3]+" madrastra de la chica no le gustaba a ella. "
+fs[0]+" se convirtió una servidora para las hermanastras."
" Ellas empezaban a llamarla Cenicienta porque ella sentaba en cenizas. "
+fs[0]+" era "+fs[4]+" y simpática, entonces todos les gustaban a ella "
"excepto su hermanastras.")
print("Cuando Cenicienta tenía 17 años, el rey tenía un baile donde "
+fs[5]+" y las chicas en la ciudad estaban invitadas. Cenicienta"
" empezaron a ella a vestirlas, pero ellas no pudieron permitir a ella si ir al"
" baile. Cuando las hermanastras salieron, Cenicienta lloró por "
+fs[6]+" horas.")
print("Pero una misteriosa y "+fs[7]+" voz llamó a Cenicienta. ¡Era su hada madrina! "
"Ella dijo a Cenicienta, 'No llores no más " +fs[0]+ ". Vas a ir al baile "
"porque eres " +fs[4]+ ". Trae una calabaza a mi.' Ella la trató y su "
"hada madrina la tocó con su "+fs[8]+".")
print("La calabaza transformó en un "+fs[9]+" coche. La hada madrina pidió "
+fs[10]+" y Cenicienta obedeció. También "
+fs[11]+" transformaron en lacayos. Ahora la hada madrina tocó la ropa "
"de la chica y transformó en "+fs[12]+".")
print("Cenicienta tenía zapatos de "+fs[14]+" y se miró muy linda. “Ahora puedes "
"ir al baile,' la hada madrina dijo, 'pero necesitas salir antes de "
"medianoche. A la medianoche, tu "+fs[12]+" va a transformó en harapos."
" Cenicienta dijo gracias y salió. Cuando llegó al castillo, todos"
" miraban a ella y el príncipe le pedí que bailar consigo.")
print("Cuando sus hermanastras llegaron a la casa donde Cenicienta estaba, "
"ellas hablaban de una princesa elegante y "+fs[15]+". El príncipe quería hablar "
"con la princesa otra vez y tenía otro baile. Las hermanastras de Cenicienta "
"salieron para el baile y su hada madrina apareció otra vez con el coche, "
"lacayos, "+fs[13]+", y zapatos.")
print("Otra vez ella bailaba con el príncipe y salió antes de la medianoche. "
"Otra vez el príncipe quería bailar y "+fs[16]+" con la princesa, entonces tenía "
"otro baile.")
print("Este tiempo, el príncipe no la dejaría, pero al doce menos cinco, "
"ella corrió y dejó un zapato. El príncipe fue "+fs[17]+" y sorprendido.")
print("Él buscó ella todo el próximo día y quería casar ella. Todas "
+fs[18]+" y las mujeres trataban de llevar el zapato.")
print("Cuando Cenicienta llevó el zapato, sus hermanastras se rían a ella, "
"pero lo cupe Cenicienta. Ella sacó el otro zapato y "+fs[19]+". Ellos "
"casaron y vivieron felices para siempre.")
print("Thanks for playing!")
| [
"[email protected]"
] | |
37c1c3091247a88ff307abacfcd63fbc7b304bb5 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/versioneer/test.py | 32fd79d45d8ce3d9b488d3feae6bca952e983ac1 | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | import versioneer
| [
"[email protected]"
] | |
7db90b76ad8b3755f314e61da0f7b4ddf29bd341 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /lambda单行表达式_0.py | dd751c9cc43d2361811c60ac8ee87e8da1b77fb7 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | GB18030 | Python | false | false | 500 | py | #!/usr/bin/python
# -*- coding: cp936 -*-
b= [x for x in range(2,100) if not[y for y in range(2,int(x**0.5)) if not x%y]]
print("100以内的全部质数是:",b)
c= [y for y in range(2,36)]
print('2--35全部输出',c)
b= [x for x in range(2,24) if True]
print('2--23全部输出',b)
d= [x for x in range(2,24) if False]
print('无返回: ',d)
d= [x for x in range(1,25) if x%2]
print('奇数有:',d)
d= [x for x in range(1,25) if not x%5]
print('5的倍数有:',d)
| [
"[email protected]"
] | |
f28e72b2529f3e6f641908d677f59d1829fa37be | 5cdc56d480380ba519be89a6f25bc7a2b306ce7c | /watson-framework-archive/imcrypt/app.py | 77f6431232a874ce0a4b892469340710fd2b8299 | [
"BSD-2-Clause"
] | permissive | sigmundv/encrypt-image | c94d05d294565a0f5c2248022eea54a73f7c96f6 | b37bc93b56be1cc7934c44705df3ba52906fe18e | refs/heads/master | 2020-12-02T23:57:40.171386 | 2017-07-22T15:20:04 | 2017-07-22T15:20:04 | 95,965,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | # -*- coding: utf-8 -*-
from watson.framework import applications
from imcrypt.config import base as config
application = applications.Http(config)
| [
"[email protected]"
] | |
9c9da1e97169f478d6774822d571de995bc96c6f | baf1dddb1e1dd4ff878438726c4f78736895e994 | /reddwarf/tests/api/mgmt/instances_actions.py | 08e895727dc550af5aefd92c704584dbb2ecc15a | [] | no_license | SlickNik/reddwarf | a8bd0a5e9431ff9bcba02e9134ab42f9ac1ab4c6 | 267e29477909f237ccf06e4c86e63be7ba54bd07 | refs/heads/master | 2020-04-06T04:52:30.684626 | 2013-04-30T20:25:13 | 2013-04-30T20:26:00 | 8,933,765 | 0 | 0 | null | 2015-11-10T14:36:18 | 2013-03-21T16:56:17 | Python | UTF-8 | Python | false | false | 4,700 | py | # Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from reddwarf.common.context import ReddwarfContext
from reddwarf.instance.tasks import InstanceTasks
from reddwarf.instance import models as imodels
from reddwarf.instance.models import DBInstance
from reddwarf.extensions.mgmt.instances.models import MgmtInstance
from novaclient.v1_1.servers import Server
from proboscis import test
from proboscis import before_class
from proboscis import after_class
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_raises
from reddwarf.common import exception
from reddwarf.extensions.mgmt.instances.service import MgmtInstanceController
GROUP = "dbaas.api.mgmt.action.reset-task-status"
class MgmtInstanceBase(object):
def setUp(self):
self.mock = mox.Mox()
self._create_instance()
self.controller = MgmtInstanceController()
def tearDown(self):
self.db_info.delete()
def _create_instance(self):
self.context = ReddwarfContext(is_admin=True)
self.tenant_id = 999
self.db_info = DBInstance.create(
name="instance",
flavor_id=1,
tenant_id=self.tenant_id,
volume_size=None,
task_status=InstanceTasks.NONE)
self.server = self.mock.CreateMock(Server)
self.instance = imodels.Instance(self.context,
self.db_info,
self.server,
service_status="ACTIVE")
def _make_request(self, path='/', context=None, **kwargs):
from webob import Request
path = '/'
print "path:", path
return Request.blank(path=path, environ={'reddwarf.context': context},
**kwargs)
def _reload_db_info(self):
self.db_info = DBInstance.find_by(id=self.db_info.id, deleted=False)
@test(groups=[GROUP])
class RestartTaskStatusTests(MgmtInstanceBase):
@before_class
def setUp(self):
super(RestartTaskStatusTests, self).setUp()
@after_class
def tearDown(self):
super(RestartTaskStatusTests, self).tearDown()
def _change_task_status_to(self, new_task_status):
self.db_info.task_status = new_task_status
self.db_info.save()
def _make_request(self, path='/', context=None, **kwargs):
req = super(RestartTaskStatusTests, self)._make_request(path, context,
**kwargs)
req.method = 'POST'
body = {'reset-task-status': {}}
return req, body
def reset_task_status(self):
self.mock.StubOutWithMock(MgmtInstance, 'load')
MgmtInstance.load(context=self.context,
id=self.db_info.id).AndReturn(self.instance)
self.mock.ReplayAll()
req, body = self._make_request(context=self.context)
self.controller = MgmtInstanceController()
resp = self.controller.action(req, body, self.tenant_id,
self.db_info.id)
self.mock.UnsetStubs()
self.mock.VerifyAll()
return resp
@test
def mgmt_restart_task_requires_admin_account(self):
context = ReddwarfContext(is_admin=False)
req, body = self._make_request(context=context)
self.controller = MgmtInstanceController()
assert_raises(exception.Forbidden, self.controller.action,
req, body, self.tenant_id, self.db_info.id)
@test
def mgmt_restart_task_returns_json(self):
resp = self.reset_task_status()
out = resp.data("application/json")
assert_equal(out, None)
@test
def mgmt_restart_task_returns_xml(self):
resp = self.reset_task_status()
out = resp.data("application/xml")
assert_equal(out, None)
@test
def mgmt_restart_task_changes_status_to_none(self):
self._change_task_status_to(InstanceTasks.BUILDING)
self.reset_task_status()
self._reload_db_info()
assert_equal(self.db_info.task_status, InstanceTasks.NONE)
| [
"[email protected]"
] | |
b70b5c7cf84e0b280ac098442bbcf016824aff58 | 358720a705bb79fd22d8d6d7a54219ea875272e9 | /src/searchml.py | 78750da7afbc7222401aeadaaee856d5983e02bf | [
"MIT"
] | permissive | gbrian/naive-machine-learning | 4aabba3af76c190ad4254f7281e5d1f9c80daea8 | 2f31be1111f3cb94e8fa8df00837000fdf30dac1 | refs/heads/master | 2021-01-09T20:19:27.145016 | 2016-07-29T08:08:39 | 2016-07-29T08:08:39 | 64,289,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,683 | py | # http://chrisstrelioff.ws/sandbox/2015/06/08/decision_trees_in_python_with_scikit_learn_and_pandas.html
from __future__ import print_function
import pickle
import os
import subprocess
from optparse import OptionParser
import pandas as pd
from sklearn.tree import DecisionTreeClassifier, export_graphviz
# Parse args
def getArgs():
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="data file", metavar="FILE")
parser.add_option("-s", "--treefile", dest="treefile",
help="decision tree file", metavar="FILE")
parser.add_option("-t", "--target",
dest="target", default=-1,
help="target column, by default last one in the dataset")
parser.add_option("-r", "--nrows",
dest="nrows", default=-1,
help="number of rows to read")
parser.add_option("-d", "--data",
dest="data", help="data to test")
parser.add_option("", "--testFile",
dest="testFile", help="data file to test")
parser.add_option("-v", "--visualize",
dest="visualize", help="Create visualizaton")
parser.add_option("-b", "--boolean",
dest="boolean", help="Transform target to boolean")
return parser.parse_args()
(options, args) = getArgs()
print(str(options))
print(str(args))
def get_data(dataFile):
"""Get the iris data, from local csv or pandas repo."""
nrows = int(options.nrows) if int(options.nrows) <> -1 else None
print("File found, reading %s rows" % nrows)
df = pd.read_csv(dataFile, index_col=0, nrows=nrows)
return df
def visualize_tree(treefile, tree, feature_names):
"""Create tree png using graphviz.
Args
----
tree -- scikit-learn DecsisionTree.
feature_names -- list of feature names.
"""
dotFile = treefile + ".dot"
pdfFile = treefile + ".pdf"
with open(dotFile, 'w') as f:
export_graphviz(tree, out_file=f,
feature_names=feature_names)
command = ["dot", "-Tpdf", dotFile, "-o", pdfFile]
try:
subprocess.check_call(command)
except:
exit("Could not run dot, ie graphviz, to "
"produce visualization")
def saveTree(treefile, tree):
with open(treefile, 'wb') as output:
pickle.dump(tree, output)
def loadTree(treefile):
dt = None
if os.path.exists(treefile):
dt = pickle.load(open(treefile, "rb"))
return dt
def buildTree(options, treefile, dataFile = None):
dt = loadTree(treefile)
if dt is not None:
return dt
if dataFile is None:
raise ValueError("No data file specified")
dt = DecisionTreeClassifier(min_samples_split=20, random_state=99)
files = []
featureFrames = []
targetFrames = []
if os.path.isdir(dataFile):
files = getFiles(dataFile, ".csv")
else:
files.append(dataFile)
for _file in files:
print("Loading data %s" % _file)
(featureValues, targetValues, features, df) = loadData(_file, options)
featureFrames.append(featureValues)
targetFrames.append(targetValues)
dt.fit(pd.concat(featureFrames), pd.concat(targetFrames))
saveTree(treefile, dt)
print("Building graph")
visualize_tree(treefile, dt, features)
return dt
def loadData(dataFile, options):
df = get_data(dataFile)
#print("* df.head()", df.head(), sep="\n", end="\n\n")
#print("* df.tail()", df.tail(), sep="\n", end="\n\n")
targetColumn = options.target if int(options.target) <> -1 else len(df.columns)-1
#print("Target column %s (%s)" % (targetColumn, df.columns[targetColumn]))
#if options.boolean is not None:
# print("Converting target to boolean")
# df[df.columns[targetColumn]] = df[df.columns[targetColumn]].astype(bool)
features = list(df.columns[:targetColumn])
#print("* features:", features, sep="\n")
featureValues = df[features]
targetValues = df[df.columns[targetColumn]]
if options.boolean is not None:
print("Converting target to boolean")
targetValues = targetValues.map(lambda x: 0 if x == 0 else 1)
return (featureValues, targetValues, features, df)
def testData(options, dt):
print(list(dt.predict(eval(options.data))))
def getFiles(dir, extension):
return filter(lambda _file: not os.path.isdir(_file) and _file.endswith(extension),
map(lambda _file: dir + "\\" + _file, os.listdir(dir)))
def testDataFile(options):
dataFile = options.testFile
(featureValues, targetValues, features, df) = loadData(dataFile, options)
trees = []
if os.path.isdir(options.treefile):
for _file in getFiles(options.treefile, ".dt"):
trees.append((buildTree(options, _file), os.path.basename(_file)))
else:
trees.append((buildTree(options, options.treefile), os.path.basename(options.treefile)))
for tree in trees:
print("Loading prediction tree %s" % tree[1])
prediction = list(tree[0].predict(featureValues))
df[tree[1]] = pd.Series(prediction, index=df.index)
_file = dataFile + '.prediction.csv'
print("Saving prediction %s " % _file)
df.to_csv(_file)
if options.data <> None:
print("Testing %s" % options.data)
testData(options)
elif options.testFile <> None:
print("Create prediction %s" % options.testFile)
testDataFile(options)
elif options.filename is not None and options.treefile is not None:
print("Building tree %s" % options.treefile)
buildTree(options, options.treefile, options.filename)
| [
"Ramones77"
] | Ramones77 |
8949fd27f20bac7ca8fa3e52863191da0940371b | 98fbbd0518b5618746e28d4e86d1ea5d1107759f | /home/migrations/0011_auto_20190321_1456.py | 667175809b0645abd674bdfb8877ce774ca8e5c1 | [] | no_license | fabriciolc/tupperTools | 2670f3baa203cb20cf4af13b0f7ad4603a1b6562 | 93bc4fbe9cdab6ae113304b99d5decfc23315d16 | refs/heads/master | 2020-05-01T01:02:20.893023 | 2019-04-30T17:05:08 | 2019-04-30T17:05:08 | 177,185,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # Generated by Django 2.1.7 on 2019-03-21 14:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0010_auto_20190321_1419'),
]
operations = [
migrations.RemoveField(
model_name='ocorrencia',
name='codigo_consultora',
),
migrations.RemoveField(
model_name='ocorrencia',
name='status',
),
migrations.DeleteModel(
name='Ocorrencia',
),
]
| [
"[email protected]"
] | |
1f970ca9cf4bace4e59a2748dfe36151bf58600f | 330db133f16c294b8d0f4bb7213bbafc18a86d55 | /tests/test_find_command.py | 6c56992ff984d16d04e47bcf0e1cbebdf3168e13 | [
"MIT"
] | permissive | bgschiller/clickthrough | 612cedf070f1599ef140c720ee18e62a760a8dd9 | ea68a1412cc54ba6ec40d0a760c6a93c8cf57255 | refs/heads/master | 2020-05-18T01:53:44.135760 | 2014-07-13T22:32:01 | 2014-07-13T22:32:01 | 21,776,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import clickthrough
import os
import shutil
from nose.tools import with_setup, assert_raises
def test_find_command():
clickthrough.get_command('hello')
clickthrough.get_command('hello.hello')
def test_find_on_bad_command():
assert_raises(Exception, #should be more specific...
clickthrough.get_command,
'nonexistent')
assert_raises(Exception,
clickthrough.get_command,
'hello.nonexistent')
def create_deep_module():
os.mkdir('deeper_module')
open('deeper_module/__init__.py','a').close()
shutil.copy('hello.py','deeper_module/hello.py')
def destroy_deep_module():
shutil.rmtree('deeper_module')
@with_setup(create_deep_module, destroy_deep_module)
def test_deeper_find_command():
clickthrough.get_command('deeper_module.hello')
clickthrough.get_command('deeper_module.hello.hello')
| [
"[email protected]"
] | |
8e9785baac8ff556a581e1e9f2f3a55fba065370 | 771de2d30593f78ec171e11adcf6c5c2e27650a4 | /model/albert/cnn.py | 07eeaaf3bd9d0a3d8901e72f4ca22d0887a4ac98 | [] | no_license | hsuehkuan-lu/sentiment-classification | e32af916d801b221bb0645e2884dcd0257ee6582 | 0b206e2d9a7d1212b707b8157a963c42122cb151 | refs/heads/main | 2023-06-13T23:42:06.461165 | 2021-07-11T07:12:37 | 2021-07-11T07:12:37 | 376,563,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | import torch
import torch.nn as nn
from model.base import ModelBase
from transformers import AlbertModel
class Model(ModelBase):
def __init__(self, bert_hidden_size, hidden_size, kernel_size, dropout, pretrained_model, *args, **kwargs):
super(Model, self).__init__()
# [B x L] -> [B x L x D], [B x D]
self.bert = AlbertModel.from_pretrained(pretrained_model)
self.dropout = nn.Dropout(dropout)
self.conv = nn.Sequential(
nn.Conv1d(bert_hidden_size, hidden_size, kernel_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU()
)
self.out = nn.Linear(hidden_size, 1)
self.init_weights()
def forward(self, tokens, masks=None):
# BERT
# [B x L x D], [B, D] (pooled_outputs)
x = self.bert(tokens, attention_mask=masks)
x = self.dropout(x.last_hidden_state)
x = self.conv(x.permute(0, 2, 1))
x = torch.max(x, 2)[0]
return nn.Sigmoid()(self.out(x))
def load_model(self, model_path):
self.load_state_dict(torch.load(model_path))
self.eval()
def init_weights(self):
def init_conv(m):
if type(m) == nn.Conv1d:
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
initializer_range = 0.02
self.conv.apply(init_conv)
nn.init.normal_(self.out.weight, std=initializer_range)
nn.init.constant_(self.out.bias, 0)
def save_model(self, model_path):
torch.save(self.state_dict(), model_path)
| [
"[email protected]"
] | |
234d3f7663e86886eea126cca3488d474d5b0a96 | cbf4e6e92f4cbb4b56002de0c7cdf369a7e5a2d0 | /vote/urls.py | b13e5e8247a898de89519bf0b98c5e44dc67987b | [] | no_license | sasha361322/vote_site | b1c827284a1bf214e1a35533fcadfee11d4c44e1 | 2e641a96e0efb1b82bbcb49c87bbb4f58ab3fa38 | refs/heads/master | 2020-04-05T23:16:54.352591 | 2016-01-12T19:15:33 | 2016-01-12T19:15:33 | 49,219,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from django.conf.urls import patterns, include, url
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^vote/get/(?P<vote_id>\d+)/$', 'vote.views.vote'),
url(r'^vote/addanswer/(?P<vote_id>\d+)/(?P<answer_id>\d+)/$', 'vote.views.addanswer'),
url(r'^vote/addvote/$', 'vote.views.addvote'),
url(r'^vote/addanswers/(?P<vote_id>\d+)/$', 'vote.views.addanswers'),
url(r'^page/(\d+)/$', 'vote.views.votes'),
url(r'^', 'vote.views.votes'),
] | [
"[email protected]"
] | |
b249358dff9c245fe79b7fef88d3504beacff288 | 7783f5498bc0d5a4bc141e54f30a76c74d19ee07 | /web/webserver.py | c4af9a5ec7b2a21606c38a38640bc7c43807d51e | [] | no_license | danhamilt1/muons | 14d9205c973f6e228c012f7cb6de7abf55ba60db | 4e372572d2e821175f898edb15e437dec8aa0f32 | refs/heads/master | 2020-12-26T02:39:41.168946 | 2014-11-05T20:07:29 | 2014-11-05T20:07:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | # from XRootD import client
from flask import Flask, render_template, url_for, send_from_directory
from datetime import datetime
# import pygal
# from pygal.style import LightSolarizedStyle
# from detector import Event
import random
app = Flask(__name__)
@app.route("/")
def home():
now = datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
# with client.File() as f:
# f.open('root://localhost//tmp/event.txt')
# events = list()
#
# for line in f.readlines():
# event = Event()
# event.__dict__.update(json.loads(line))
# events.append(event)
#
# energies = list()
# for event in events:
# energies.append(event.data[0] + random.random())
#
# bar_chart = pygal.Bar(style=LightSolarizedStyle)
# bar_chart.add('Energies', energies)
# chart = bar_chart.render(is_unicode=True)
data = list()
with open('test/test.txt', 'r') as f:
bits = f.readline().split()
previous = datetime.fromtimestamp(int(bits[0]) / 1e9)
energy = ((int(bits[2]) + int(bits[4])) / 2) * 0.2
data.append([0, energy])
for line in f:
bits = line.split()
current = datetime.fromtimestamp(int(bits[0]) / 1e9)
delta = current - previous
delay = (delta.seconds * 1000.0) + (delta.microseconds / 1000.0)
previous = current
energy = ((int(bits[2]) + int(bits[4])) / 2) * 0.2
data.append([delay, energy])
# Make a copy and shuffle it
data2 = data[:]
random.shuffle(data2)
templateData = {
'title': 'Muon detector web interface',
'time': timeString,
# 'events': events,
# 'chart': chart,
'node_id': 10,
'data': data,
'data2': data2
}
return render_template('main.html', **templateData)
@app.route("/ol3")
def ol3_test():
templateData = {
'title': 'ol3 test'
}
return render_template('map.html', **templateData)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True)
| [
"[email protected]"
] | |
f186738e39d4f69aeca3c6b07920c064c8ae49d8 | 9b83834db424c4ae525ae161ccf40dd8ac2c17ba | /tools/pdf-req-parser/autosar_pdf_parser/__init__.py | a9ffe51834f557d8e38b9ab5c35d3d72c5fec889 | [
"MIT"
] | permissive | mmyszke/AUTOSAR-Adaptive | cc2e5ba6462a2de78f87a49dccefd73be4c136b2 | c45550b9e1e450de34b2ea756443d296bdbb9928 | refs/heads/master | 2022-11-30T00:58:46.007731 | 2020-08-07T06:59:46 | 2020-08-07T06:59:46 | 267,556,525 | 0 | 0 | MIT | 2020-05-29T12:13:07 | 2020-05-28T10:08:46 | null | UTF-8 | Python | false | false | 2,883 | py | import os
import simplejson
from PyPDF2 import PdfFileReader
class SimpleRequirementsParser(object):
def __init__(self, path, verbose=False):
self.input_directory = path
self.input_list_file = '{}/input.json'.format(path)
self.input_list = SimpleRequirementsParser.get_input_list(self.input_list_file)
self.verbose = verbose
self.requirements = {}
self.parse()
@staticmethod
def is_input_path_correct(path):
input_list_file = '{}/input.json'.format(path)
return os.path.isfile(input_list_file)
@staticmethod
def get_input_list(path):
with open(path, 'rb') as file:
return simplejson.load(file)
@staticmethod
def find_all_entries_between(text, p, q, required_char=None):
entries = []
entry = ''
found = False
for ch in text:
if ch == p:
found = True
entry += ch
elif ch == q:
entry += ch
if not required_char:
entries.append(entry.replace('{}{}'.format(p, p), p).replace('{}{}'.format(q, q), q))
else:
if required_char in entry:
entries.append(entry.replace('{}{}'.format(p, p), p).replace('{}{}'.format(q, q), q))
entry = ''
found = False
if found:
entry += ch
return entries
def log(self, text):
if self.verbose:
print(text)
def parse(self):
for input_entry in self.input_list:
file_path = '{}/{}'.format(self.input_directory, input_entry['filename'])
self.log('Processing new file: {}'.format(file_path))
with open(file_path, 'rb') as f:
self.requirements[input_entry['filename']] = {} # new key in requirements list
pdf = PdfFileReader(f)
entries = []
for page_num in range(input_entry['first_page'] - 1, input_entry['last_page']):
self.log('\tPage {}/{}'.format(page_num + 1, input_entry['last_page']))
page = pdf.getPage(page_num)
text = page.extractText().replace('\n', '')
entries.extend(SimpleRequirementsParser.find_all_entries_between(text, '[', ']', required_char='_'))
# only for SWS files (temporary)
# todo: implement other types (_RS_, _TPS_)
current_rs = None
for entry in entries:
if 'RS' in entry:
current_rs = entry
self.requirements[input_entry['filename']][current_rs] = {}
elif 'SWS' in entry:
self.requirements[input_entry['filename']][current_rs][entry] = {'more_data': 'soon'}
| [
"[email protected]"
] | |
aa5a6d6bffb3faf7cd5237c368d24247a9cfb507 | 0c4b71fcf867fbd32148f84f228fd510cc4e46d9 | /lastmile/migrations/0016_actor_agreement.py | e62c961eff152a77250d9ac1484ef3150b292af7 | [] | no_license | acounsel/lastmile | 0f4a88c767752b0fd160d536024895250b5cb633 | e54ec5357d03cf083ea3bbff0a8f1ccd5ccd832b | refs/heads/master | 2021-08-26T00:39:58.089921 | 2021-08-13T18:25:58 | 2021-08-13T18:25:58 | 242,027,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.0.5 on 2020-05-13 03:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lastmile', '0015_agreement_slug'),
]
operations = [
migrations.AddField(
model_name='actor',
name='agreement',
field=models.ManyToManyField(blank=True, to='lastmile.Agreement'),
),
]
| [
"[email protected]"
] | |
70b79e0772e5b3d392cca2ec29ce149af520b38c | 583e9099d4905a554bbd8a59e1fce6ebe16f34a7 | /coords.py | 615f56a2e8089d22d9718f5d7396f88a1cd019b0 | [] | no_license | App24/Minecraft-Coordinate-Convertor | f74c2e2bb1f9cd93012bf077dd5805601ef0cd18 | 343f6e5f6d08fb8efa76225ec74e58c17194e355 | refs/heads/master | 2022-12-23T05:16:29.589715 | 2020-09-30T05:29:19 | 2020-09-30T05:29:19 | 299,817,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | import tkinter #Tkinter is a module that comes with python
from tkinter.font import * #Prevents doing tkinter.font.Font
def popup(title,_text,_fg="black"):
toplevel = tkinter.Toplevel()
#Setting Icon
icon=tkinter.PhotoImage(file="icon.gif")
toplevel.tk.call("wm","iconphoto", toplevel._w, icon)
#Setting Size and prevent the window being resized
toplevel.title(title)
toplevel.resizable(width=False, height=False)
helv24 = Font(family='Helvetica', size=20, weight='bold')
label1=tkinter.Label(toplevel, text=_text,font=helv24, anchor="center",fg=_fg)
ok=tkinter.Button(toplevel, text="Ok", command=toplevel.destroy)
label1.pack()
ok.pack()
def convert(_x, _z, _dim, _name):
if _name=="":
popup("Error","You must input a name!","red")
return
if _x == "":
_x="0"
if _z == "":
_z="0"
#Let 'x' and 'z' be used for overworld coords and 'tempX' and 'tempZ' be for nether coords
x=float(_x)
z=float(_z)
tempX=x
tempZ=z
dim=int(_dim)
if dim==0:
tempX=x/8
tempZ=z/8
elif dim==1:
x=tempX*8
z=tempZ*8
popup("Completion","Done!")
lines="Overworld:\nX: "+str(x)+"\nZ: "+str(z)+"\nNether:\nX: "+str(tempX)+"\nZ: "+str(tempZ)
"""Stitches all of the variabled together with pretty text
'\n' means new line"""
with open(_name+" coords.txt", "w") as f:
"""Opens the file that contains the variable '_name' plus ' coords.txt' in writting mode
this mode allows for the creation of the file if there is non and rewrites over it
Look up the different modes for a better understanding"""
f.writelines(lines)
def validate(char, entry_value):
chars=entry_value[:-1] #Selects all the text in the textbox except the last character
if (not char.isdigit() and not "." in char and not "-" in char) or ("." in chars and "." in char) or ("-" in chars and "-" in char):
"""if entered char is not a digit or is not "." or is not "-" then return false.
If the entered char is "." and there is already a "." in text, return false.
If the entered char is "-" and there is already a "-" in text, return false"""
return False
else:
return True
def converter():
root = tkinter.Tk()
#Setting Icon
icon=tkinter.PhotoImage(file="icon.gif")
root.tk.call("wm","iconphoto", root._w, icon)
#Setting Title
root.title("Minecraft Coordinates Convertor")
#Setting Size and prevent the window being resized
root.geometry('{}x{}'.format(535, 250))
root.resizable(width=False, height=False)
#Used to check the inputs for 'entry_x' and 'entry_z'
vcmd = (root.register(validate), '%S', '%P')
#Setting font up
helv24 = Font(family='Helvetica', size=24, weight='bold')
label_name = tkinter.Label(root, text="Name:", font=helv24)
label_x = tkinter.Label(root, text="X coord:", font=helv24)
label_z = tkinter.Label(root, text="Z coord:", font=helv24)
entry_name = tkinter.Entry(root, font=helv24)
entry_x = tkinter.Entry(root, validate = 'key', validatecommand = vcmd, font=helv24)
entry_z = tkinter.Entry(root, validate = 'key', validatecommand = vcmd, font=helv24)
var=tkinter.IntVar()
dim = tkinter.Checkbutton(root, text="Nether",variable=var, font=helv24)
convertB = tkinter.Button(root, text="Convert", command=lambda:convert(entry_x.get(),entry_z.get(),var.get(),entry_name.get()), font=helv24)
#In this case lambda is used to allow parsing of arguments
#Placing the different widgets into their place
label_name.grid(row=0, padx=(42, 10))
label_x.grid(row=1, padx=(10, 10))
label_z.grid(row=2, padx=(10, 10))
entry_name.grid(row=0,column=1, padx=(10,10))
entry_x.grid(row=1,column=1, padx=(10,10))
entry_z.grid(row=2,column=1, padx=(10, 10))
dim.grid(columnspan=3)
convertB.grid(columnspan=4)
root.mainloop()
def viewer():
pass
converter() | [
"[email protected]"
] | |
fc852f88b4e0c78bbfc8a6e3ef6906da90cd2fdc | 5e152949f9df4a2821cf0b5973af0b525dd895fc | /train.py | 480b50f3426accd2e1d4a00def40b477d64518f8 | [] | no_license | Chunlinx/WSAN4SNLI | 04ab4392219c02fe2e19923064ac154e7fc4ff3e | f331e587902934a5ec578a2df2dc0d8033e9a60f | refs/heads/master | 2020-03-28T03:53:13.354186 | 2018-08-30T09:06:32 | 2018-08-30T09:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,230 | py | # coding: utf-8
# author: huang ting
# created time: 2018-04-28-23:14
import tensorflow as tf
import pickle as pkl
import numpy as np
import models
import time
import os
import math
import sys
import utils
from scipy.stats import spearmanr, pearsonr
np.random.seed(1234567)
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def get_mask(inputs):
shape = inputs.shape
mask = np.zeros(shape, dtype=np.float32)
for i in range(shape[0]):
for j in range(shape[1]):
mask[i][j] = 1.0 if inputs[i,j] > 0 else 0.0
return mask
def get_word_emb(word2idx):
idx2word = {}
for word, idx in word2idx.items():
idx2word[idx] = word
emb_path = "/home/ht/glove.6B/glove.6B.300d.txt"
word2embedding = {}
f = open(emb_path, 'rb')
for line in f:
values = line.split()
word = values[0]
word = word.decode()
emb = np.asarray(values[1:], dtype='float32')
word2embedding[word] = emb
# print(type(word))
f.close()
print(len(word2embedding))
hit_count = 0 # 统计命中数
zero_embedding = np.zeros([300, ], dtype=np.float32)
embedding_matrix = np.random.uniform(-0.05, 0.05, size=[len(idx2word), 300]).astype(np.float32)
for i in range(len(word2idx)):
if i == 0:
emb = zero_embedding
embedding_matrix[i] = emb
else:
word = idx2word[i]
emb = word2embedding.get(word)
if emb is not None:
hit_count += 1
embedding_matrix[i] = emb
print("hit rate is {}".format(hit_count*1.0/len(word2idx)))
print(embedding_matrix.shape)
print(embedding_matrix[:10])
f = open("../SICK/embedding_matrix.pkl", "wb")
pkl.dump(embedding_matrix, f)
f.close()
def get_pretrained_embedding(file_name, voc_size, emb_dim=300):
f = open(file_name, 'rb')
embs = pkl.load(f)
f.close()
assert embs.shape[0] == voc_size
assert embs.shape[1] == emb_dim
return embs
def macro_recall_rate(predictions, labels, nb_class=3):
# 计算 macro-recall,每一类计算召回率,然后取平均;
# 同时返回每一类的召回率和平均召回率。
nb_per_class = np.zeros(nb_class, dtype=float)
nb_right_per_class = np.zeros(nb_class, dtype=float)
for p, l in zip(predictions, labels):
nb_per_class[l] += 1
if p == l:
nb_right_per_class[l] += 1
recall = nb_right_per_class / nb_per_class
macro_recall = recall.mean()
return recall, macro_recall
def vector2score(vectors):
# print(len(vectors))
# 把5-d的向量转为1-5之间的一个分数
base = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=float)
scores = []
for vector in vectors:
score = (base * vector).sum()
scores.append(score)
scores = np.array(scores, dtype=float)
return scores
def my_spearman(scores1, scores2):
sp = spearmanr(scores1, scores2)
return sp[0]
def my_pearson(scores1, scores2):
pe = pearsonr(scores1, scores2)
return pe[0]
def mse(scores1, scores2):
return np.square(scores1 - scores2).mean()
def train_epoch(session, model, batch_size, train_data, keep_prob, word_keep_prob):
# train_data是一个dict包含了"sent1"/"sent2"/"label"
# fetches = [model.loss, model.correct_num, model.train_op]
fetches = [model.loss, model.prob, model.train_op]
x1 = train_data["sent1"]
x2 = train_data["sent2"]
# y = train_data["label"]
score = train_data["score"]
# shuffle data set
nb_samples = x1.shape[0]
idx_list = list(range(nb_samples))
np.random.shuffle(idx_list)
x1 = x1[idx_list, :]
x2 = x2[idx_list, :]
# y = y[idx_list]
score = score[idx_list, :]
nb_batches = int(nb_samples * 1.0 / batch_size)
nb_left = nb_samples - nb_batches * batch_size
st_time = time.time()
nb_right_sum = 0
loss_sum = 0.0
probs = []
for j in range(nb_batches):
batch_x1 = x1[j*batch_size: j*batch_size+batch_size][:]
batch_x2 = x2[j*batch_size: j*batch_size+batch_size][:]
batch_score = score[j*batch_size: j*batch_size+batch_size][:]
# batch_y = y[j*batch_size: j*batch_size+batch_size]
batch_mask1 = get_mask(batch_x1)
batch_mask2 = get_mask(batch_x2)
feed_dict = {model.keep_prob: keep_prob, model.word_keep_prob: word_keep_prob, model.score_vec: batch_score,
# model.y: batch_y,
model.x1: batch_x1, model.x2: batch_x2,
model.x_mask1: batch_mask1, model.x_mask2: batch_mask2}
# loss, nb_right, _ = session.run(fetches, feed_dict)
loss, prob, _ = session.run(fetches, feed_dict)
loss_sum += loss * batch_size
probs.append(prob)
# nb_right_sum += nb_right
# 如果训练集的样本数无法刚好被batch_size整除
if nb_left > 0:
feed_dict = {model.keep_prob: keep_prob, model.word_keep_prob: word_keep_prob, model.score_vec: score[-nb_left:][:],
# model.y: y[-nb_left:],
model.x1: x1[-nb_left:][:], model.x2: x2[-nb_left:][:],
model.x_mask1: get_mask(x1[-nb_left:][:]), model.x_mask2: get_mask(x2[-nb_left:][:])}
# loss, nb_right, _ = session.run(fetches, feed_dict)
loss, prob, _ = session.run(fetches, feed_dict)
loss_sum += loss * nb_left
probs.append(prob)
# nb_right_sum += nb_right
print("This epoch costs time {} s".format(time.time() - st_time))
average_loss = loss_sum / nb_samples
score = vector2score(score)
probs = np.concatenate(probs, axis=0)
pred_score = vector2score(probs)
sp = my_spearman(pred_score, score)
pe = my_pearson(pred_score, score)
mse_ = mse(pred_score, score)
# accuracy = nb_right_sum * 1.0 / nb_samples
return average_loss, sp, pe, mse_
# return average_loss, accuracy
def validate_or_test(session, model, data, batch_size):
# 这里的data可能是验证集也可能是测试集,返回平均loss和正确率
x1 = data["sent1"]
x2 = data["sent2"]
# y = data["label"]
score = data["score"]
nb_samples = x1.shape[0]
nb_batches = int(math.floor(nb_samples * 1.0 / batch_size))
nb_left = nb_samples - batch_size * nb_batches
fetches = [model.loss, model.prob]
# no training for validation/test set
loss_sum = 0.0
probs = []
nb_right_sum = 0
# print(nb_samples, nb_batches, nb_left)
for j in range(nb_batches):
batch_x1 = x1[j * batch_size: j * batch_size + batch_size][:] # (batch_size, 78)
batch_x2 = x2[j * batch_size: j * batch_size + batch_size][:] # (batch_size, 78)
batch_score = score[j * batch_size: j * batch_size + batch_size][:] # (batch_size, 5)
# batch_y = y[j * batch_size: j * batch_size + batch_size] # (batch_size, )
batch_mask1 = get_mask(batch_x1)
batch_mask2 = get_mask(batch_x2)
feed_dict = {model.keep_prob: 1.0, model.word_keep_prob: 1.0,
model.x1: batch_x1, model.x2: batch_x2, model.score_vec: batch_score,
# model.y: batch_y,
model.x_mask1: batch_mask1, model.x_mask2: batch_mask2
}
# loss, nb_right = session.run(fetches, feed_dict)
loss, prob = session.run(fetches, feed_dict)
# nb_right_sum += nb_right
loss_sum += loss * batch_size
probs.append(prob)
if nb_left > 0:
feed_dict = {model.keep_prob: 1.0, model.word_keep_prob: 1.0,
model.x1: x1[-nb_left:][:], model.x2: x2[-nb_left:][:], model.score_vec: score[-nb_left:][:],
# model.y: y[-nb_left:],
model.x_mask1: get_mask(x1[-nb_left:][:]), model.x_mask2: get_mask(x2[-nb_left:][:]),
}
# loss, nb_right = session.run(fetches, feed_dict)
loss, prob = session.run(fetches, feed_dict)
# nb_right_sum += nb_right
loss_sum += loss * nb_left
probs.append(prob)
average_loss = loss_sum / nb_samples
probs = np.concatenate(probs, axis=0)
pred_scores = vector2score(probs)
score = vector2score(score)
sp = my_spearman(pred_scores, score)
pe = my_pearson(pred_scores, score)
mse_ = mse(pred_scores, score)
# accuracy = nb_right_sum * 1.0 / nb_samples
return average_loss, sp, pe, mse_
if __name__ == "__main__":
train_data, dev_data, test_data, word2idx = utils.load_snli_data()
word_emb=get_pretrained_embedding("../SNLI/embedding_matrix.pkl", voc_size=57323)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess = tf.Session()
config = utils.load_config_from_file("config",main_key="snli")
config = utils.load_config_from_file("config", main_key="train_snli", config=config)
config["word_emb"] = word_emb
model = models.WSAN(config_=CONFIG)
sess.run(tf.global_variables_initializer())
# best_accuracy = [0.0, 0.0]
best_valid_corr = []
best_corr = [0.0, 0.0]
for i in range(nb_epoches):
if i >= decay_start:
new_lr = sess.run(model.learning_rate) * lr_decay
model.assign_lr(sess, new_lr)
print("The new lr is {0}".format(new_lr))
print("Epoch {0}".format(i))
average_loss \
= train_epoch(sess, model, batch_size=25, train_data=train_data, keep_prob=1.0, word_keep_prob=1.0)
# print("ave loss: {0}, accuracy: {1}".format(average_loss, accuracy))
print("ave loss: {0}".format(average_loss))
devel_loss, devel_sp, devel_pe, devel_mse \
= validate_or_test(sess, model, batch_size=300, data=dev_data)
test_loss, test_sp, test_pe, test_mse \
= validate_or_test(sess, model, batch_size=300, data=test_data)
print("In devel set, ave loss: {0}, spearman: {1}, pearson: {2}, mse: {3}".format(devel_loss, devel_sp, devel_pe, devel_mse))
print("In test set, ave loss: {0}, spearman: {1}, pearson: {2}, mse: {3}".format(test_loss, test_sp, test_pe, test_mse))
if (devel_sp + devel_pe)/2.0 > best_valid_corr:
best_valid_corr = (devel_sp + devel_pe)/2.0
best_corr = [test_sp, test_pe]
elif (devel_sp + devel_pe)/2.0 == best_valid_corr:
if test_sp + test_pe > best_corr[0] + best_corr[1]:
best_corr = [test_sp, test_pe]
else:
pass
# if devel_acc > best_accuracy[0]:
# best_accuracy = [devel_acc, test_acc]
# elif devel_acc == best_accuracy[0]:
# best_accuracy[1] = max(test_acc, best_accuracy[1])
# else:
# pass
sys.stdout.flush()
print("Test accuracy is {0}".format(best_accuracy[-1]))
| [
"[email protected]"
] | |
98d38db48e4ed92aa5459825d91eb2f6629564f3 | 45ed991d87ff31797dc5ea692eea304a4c2ff07e | /faq/views.py | 2c9546b9416ee83f9c444dfd5129dad0da7af9d3 | [] | no_license | Hadian1989/FAQ | 4314686858c38895970bf5de176891974ca87b87 | e8e0bf44e681162ac767829868231993ba0de79a | refs/heads/main | 2023-07-31T18:41:34.210333 | 2021-09-09T15:26:25 | 2021-09-09T15:26:25 | 404,612,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django.shortcuts import render
from django.views.generic import TemplateView
from .models import *
# Create your views here.
def show_categories(request):
return render(request, "categories.html", {'categories': Category.objects.all()})
class HomePageView(TemplateView):
template_name = 'home.html'
| [
"[email protected]"
] | |
0c6f4a482144c0528ccfe935a6c4a8422cf0bd92 | 04ff52cae8e6645ce14347933a429081ae12d055 | /Chapter 2/21211 Sara.py | b4b237555ee82121723f5236016a8e39df1a33ee | [] | no_license | smaxwell953/PythonLabs | d0518ef038f586ce75fbc5cd9fb4827be9396381 | d29e7e0cffe86ac2ed64664e851c1d4b3798b057 | refs/heads/master | 2021-07-04T19:13:53.260126 | 2021-04-26T01:49:46 | 2021-04-26T01:49:46 | 233,175,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | #Write a one-line piece of code, using the print() function, as well as the newline and escape characters, to match the expected result outputted on three lines.
print('"I\'m"\n''""learning""\n''"""Python"""')
| [
"[email protected]"
] | |
197947df3f6c3b552f542cad538188861870d86f | 95c027e7302751b335b33d287e0efac7483edfc3 | /boj/BOJ_평균.py | aaf5066b2ef4cd7f9d8f10ec2c10ff292124ceba | [] | no_license | kimchaelin13/Algorithm | 01bd4bcb24c58d5d82714e60272d5af91d2d9ce8 | 53f7f3cff5a141cf705af3c9f31cdb9ae997caff | refs/heads/master | 2023-02-03T08:58:26.660299 | 2020-12-20T17:01:16 | 2020-12-20T17:01:16 | 296,996,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import sys
sys.stdin = open("input.txt", "r")
s = []
for i in range(5):
s.append(int(input()))
for j in range(len(s)):
if s[j] < 40:
s[j]=40
print(round(sum(s)/len(s))) | [
"[email protected]"
] | |
7d785f758a09bb73456db84de1934fe4b7496de3 | 625a50a9d6664e10578094fc943a5f0595d6d41c | /mysite/travel/apps.py | bc26ab87a4ea6ee6855bb07c27973878728c74c0 | [] | no_license | jll2884/testing | 92b325d089b370600bc5bfbf5125a88ba4b229f2 | 067afb97ec2d94160550a9a452b580c222f199aa | refs/heads/master | 2020-04-23T19:56:03.723730 | 2019-02-19T06:53:59 | 2019-02-19T06:53:59 | 171,422,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class travelConfig(AppConfig):
name = 'travel'
| [
"[email protected]"
] | |
9bf257fa6c138050306f2d742f190f5687aa2ada | af6c649718b518e20bd8c5b1f26a3780f8babd1c | /time_planner.py | 00d9ca1149338f838e8980b97c6a2eba478010c3 | [] | no_license | quanewang/public | 31274d9c3f6cd96a62872620d7d4f81957734ecd | 7593f887ca54f8b393bbd0b196e498665663d665 | refs/heads/master | 2022-01-04T08:09:51.940244 | 2022-01-04T02:23:14 | 2022-01-04T02:23:14 | 119,501,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | """
--LESSON
--- [b[0], a[1]]=>[b[0], min(a[1], b[1])]
--- i += 1 => j += 1
--- [max(a[0], b[0]), min(a[1], b[1])]
Time Planner
Implement a function meetingPlanner that given the availability, slotsA and slotsB, of two people and a meeting duration dur, returns the earliest time slot that works for both of them and is of duration dur. If there is no common time slot that satisfies the duration requirement, return null.
Time is given in a Unix format called Epoch, which is a nonnegative integer holding the number of seconds that have elapsed since 00:00:00 UTC, Thursday, 1 January 1970.
Each persons availability is represented by an array of pairs. Each pair is an epoch array of size two.
The first epoch in a pair represents the start time of a slot. The second epoch is the end time of that slot.
The input variable dur is a positive integer that represents the duration of a meeting in seconds.
The output is also a pair represented by an epoch array of size two.
In your implementation assume that the time slots in a persons availability are disjointed,
i.e, time slots in a persons availability dont overlap. Further assume that the slots are sorted by slots start time.
Implement an efficient solution and analyze its time and space complexities.
Examples:
input: slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 8
output: [60, 68]
input: slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 12
output: null # since there is no common slot whose duration is 12
"""
def meeting_planner(slotsA, slotsB, dur):
i , j = 0, 0
while i<len(slotsA) and j<len(slotsB):
slot = find_common(slotsA[i], slotsB[j])
if slot and slot[1]-slot[0]>=dur:
return [slot[0], slot[0]+dur]
elif slotsA[i][1]==slotsB[j][1]:
i += 1
j += 1
elif slotsA[i][1]>slotsB[j][1]:
j += 1
else:
i += 1
return []
def find_common(a, b):
return [max(a[0], b[0]), min(a[1], b[1])]
slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 12
slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 8
print meeting_planner(slotsA, slotsB, dur) | [
"[email protected]"
] | |
a2475d2665bf611b955e1b272f6b4d90ee9bc82a | f5fb9b4eead0b32d51fd54c099c1b15d0551fca6 | /venv/grey_ct.py | 939983b9f3bf0c952c482216761965f5963504b0 | [
"Apache-2.0"
] | permissive | hellending/CT-conv2d_SVM_random-forest | 3ccbea45b40f1aeab0fc318eb5bbb4534dc0b3ff | 100807fadbb55297cad45ce2163ed9a14e3149ab | refs/heads/main | 2023-02-08T13:07:21.880205 | 2020-12-18T18:26:22 | 2020-12-18T18:26:22 | 321,894,912 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,881 | py | import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import grey_picture as rd
import os,sys
from sklearn.model_selection import train_test_split
import time
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.disable_eager_execution()
def creat_label(length,classfication_value,one_hot_value):
#创建一个适当大小的矩阵来接收
array=np.arange(length*classfication_value).reshape(length,classfication_value)
for i in range(0,length):
array[i]=one_hot_value #这里采用one hot值来区别合格与不合格
return array
x = tf.placeholder(tf.float32,[None,128,128,1])/255
y = tf.placeholder(tf.float32,[None,2])
#四个参数:长,宽,单个过滤器深度,过滤器个数
weights = {'wc1': tf.Variable(tf.random_normal([5,5,1,64],stddev=0.05)),
'wc2': tf.Variable(tf.random_normal([5,5,64,128],stddev=0.05)),
'wc3': tf.Variable(tf.random_normal([5,5,128,512],stddev=0.05)),
'wd1': tf.Variable(tf.random_normal([16*16*512,1024],stddev=0.05)),
'wd2': tf.Variable(tf.random_normal([1024,2],stddev=0.05))
}
#b值(特征值),偏移量
biases = {'bc1': tf.Variable(tf.random_normal([64],stddev=0.05)),
'bc2': tf.Variable(tf.random_normal([128],stddev=0.05)),
'bc3': tf.Variable(tf.random_normal([512],stddev=0.05)),
'bd1': tf.Variable(tf.random_normal([1024],stddev=0.05)),
'bd2': tf.Variable(tf.random_normal([2],stddev=0.05))
}
def Forward_conv(input,weights,biases,keepratio):
#输入的批量数据处理
input_r = tf.reshape(input,shape=[-1,128,128,1])
conv_1 = tf.nn.conv2d(input=input_r, filter=weights['wc1'], strides=[1, 1, 1, 1], padding='SAME')
conv_1 = tf.nn.relu(tf.add(conv_1, biases['bc1']))
pool_1 = tf.nn.max_pool(value=conv_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 将池化层合理缩减,去掉一部分神经节点,防止过拟合,这里意思是将pool1层保留百分比为keepratio的节点
_pool1_drl = tf.nn.dropout(pool_1, keepratio)
conv_2 = tf.nn.conv2d(input=_pool1_drl, filter=weights['wc2'], strides=[1, 1, 1, 1], padding='SAME')
conv_2 = tf.nn.relu(tf.add(conv_2, biases['bc2']))
pool_2 = tf.nn.max_pool(value=conv_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
_pool2_drl = tf.nn.dropout(pool_2, keepratio)
conv_3 = tf.nn.conv2d(input=_pool2_drl, filter=weights['wc3'], strides=[1, 1, 1, 1], padding='SAME')
conv_3 = tf.nn.relu(tf.add(conv_3, biases['bc3']))
pool_3 = tf.nn.max_pool(value=conv_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
_pool3_drl = tf.nn.dropout(pool_3, keepratio)
densel = tf.reshape(_pool3_drl, [-1, weights['wd1'].get_shape().as_list()[0]])
fcl = tf.nn.sigmoid(tf.add(tf.matmul(densel, weights['wd1']), biases['bd1']))
fcl_drl = tf.nn.dropout(fcl, keepratio)
out = tf.add(tf.matmul(fcl_drl, weights['wd2']), biases['bd2'])
return out
#获取数据集
covid = rd.creat_x_database('.\\grey_covid',128,128)
non_covid = rd.creat_x_database('.\\grey_non',128,128)
dataSet = np.vstack((covid,non_covid))
#设定标签
covid_label = creat_label(covid.shape[0],2,[0,1])
non_covid_label = creat_label(non_covid.shape[0],2,[1,0])
label = np.vstack((covid_label,non_covid_label))
#获取最终数据集
# x_train,x_test,y_train,y_test = train_test_split(dataSet,label,test_size=0.1,random_state=0,shuffle=True)
pre = Forward_conv(x,weights,biases,0.8)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = pre,labels = y))
# cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pre+ 1e-10), reduction_indices=[1]))
optimizer = tf.train.AdamOptimizer(0.00001).minimize(cost)
p = tf.equal(tf.argmax(y,1),tf.argmax(pre,1))
accuracy = tf.reduce_mean(tf.cast(p,tf.float32))
###########################################################################
sess = tf.Session()
sess.run(tf.global_variables_initializer())
avg_cost = 0
for j in range(0,1000):
x_train, x_test, y_train, y_test = train_test_split(dataSet, label, test_size=0.2, random_state=0,shuffle=True)
print(j)
avg_cost = 0
for i in range(0,3):
k = i*179
x_train1 = [x_train[m] for m in range(k,k+179)]
y_train1 = [y_train[m] for m in range(k,k+179)]
sess.run(optimizer, feed_dict={x: x_train1, y: y_train1})
avg_cost += sess.run(cost,feed_dict={x: x_train1, y: y_train1})/3
# avg_cost += tf.reduce_mean(sess.run(cost, feed_dict={x: x_train1, y: y_train1}))
# print(avg_cost)
# training_acc = sess.run(accuracy, feed_dict={x: x_train, y: y_train})
# print('训练数据精度:', training_acc)
test_acc = sess.run(accuracy, feed_dict={x: x_test, y: y_test})
print('测试数据精度:', test_acc)
print('损失值',avg_cost) | [
"[email protected]"
] | |
3f728415282774230325f355da06cc6b4cb449c8 | ee1ad2c3fbd433f0a75034c7f90944bf6c7d80e6 | /main.py | 0048758b5e1b7bba1762c8bf719381b3cd390431 | [] | no_license | carolnesso/MD-project | 74499bbe66264ae45e6e0059e8284d5b988726ae | 9e261e4e98d46667b9bdfde3ddf429caad131bc7 | refs/heads/master | 2020-07-07T17:35:25.519437 | 2019-08-20T20:32:14 | 2019-08-20T20:32:14 | 203,423,485 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,391 | py | """
ARQUIVO PRINCIPAL (main.py)
"""
import prime
import keys
import mdc
import dictionaryies
import exp_modular
import savefile
def encrypt(code_msg, n, e):
"""
Recebe uma sequencia numerica (onde cada numero representa um caractere
digitado pelo usuario na messagem original), o valor 'n' e o expoente 'e'.
Retorna a mensagem encriptada
"""
mensagem = []
for i in code_msg:
mensagem.append(exp_modular.fast_mod_expn(i, e, n))
print("This is your encrypted message:\n\n{}".format(mensagem))
return mensagem
def generate_public_keys():
"""
Pede ao usuário os valores 'p', 'q' e 'e'
para gerar as chaves.
Salva um arquivo com as chaves públicas 'n' e 'e'.
"""
while True:
p = 0
q = 0
e = 0
while True:
p = int(input("Enter a prime number: "))
if prime.is_prime(p):
break
print("Sorry, this number is not accepted.")
while True:
q = int(input("Enter another prime number: "))
if prime.is_prime(q):
break
print("Sorry, this number is not accepted.")
if p*q >=27:
break
else:
print("Sorry, P*Q is under to 27")
phi = keys.calc_phi(p, q)
while True:
e = int(input("Enter a number between 1 and {} which is prime relation to {}: ".format(phi, phi)))
if mdc.mdc_euclides(e, phi) == 1:
break
print("Sorry, this number is not accepted.")
n = keys.calc_n(p, q)
savefile.save_file("n: {}\ne: {}".format(n, e), "public_keys.txt")
def decrypt(encrypted_msg):
"""
Recebe como parametro uma sequencia numerica
pede ao usuario os valores 'p', 'q' e 'e' e decripta a mensagem
retornando uma mensagem ainda numerica mas que pode ser facilmente
'devolvida' para a sua forma original (texto).
"""
p = int(input("Enter your first key (p): "))
q = int(input("Enter your second key (q): "))
e = int(input("Enter your third key (e): "))
n = keys.calc_n(p, q)
phi = keys.calc_phi(p, q)
d = keys.calc_d(e, phi)
decriptado = []
for i in encrypted_msg:
decriptado.append(exp_modular.fast_mod_expn(int(i), d, n))
return decriptado
def first_option(message, n, e):
"""
chamada quando o usuário quer encriptar
transforma o texto da mensagem numa sequencia
numerica para ser posteriormente encriptada.
retorna a mensagem já encriptada.
"""
code_message = []
letter_to_number = dictionaryies.code_letters()
for i in message:
code_message.append(letter_to_number[i])
resultado = encrypt(code_message, n, e)
return resultado
def second_option(message):
"""
Chamada quando o usuário quer decriptar a mensagem
separa a sequencia numerica por espaços.
adiciona as letras à mensagem final decriptada
e retorna a mensagem decriptada
"""
n_message = message.split()
numbers_to_letters = dictionaryies.letters_code()
decrypted = decrypt(n_message)
decodificated_message = ""
for i in decrypted:
decodificated_message += numbers_to_letters[i]
print("\n\nYour original message:\n{}\n\n".format(decodificated_message))
return decodificated_message
def control():
"""
Controla o fluxo das ações do usuário no programa
variavel option: guarda o número da intenção do usuário quanto a ação no programa
variavel input_option: guarda o número com o tipo de entrada 'digitação' ou 'arquivo txt'
"""
while True:
option = int(input("What do you want to do?\n1. Generate public keys\n2. Encrypt\n3. Decrypt\n4. Kill this process\n:::: "))
input_option = 0
final_content = ""
if option == 4:
print("bye")
break
elif option != 1:
input_option = int(input("Do you want 1. to type or 2. send a file?\n:::: "))
else:
generate_public_keys()
break
if input_option == 1:
message = input("Enter your message here: ")
message = message.upper() #Caso a mensagem nao esteja em caixa alta
if option == 2:
n = int(input("Enter your first key (n): "))
e = int(input("Enter your second key (e): "))
final_content = first_option(message, n, e)
elif option == 3:
final_content = second_option(message)
elif input_option == 2:
name_file = input("What is the file name?\n:::: ")
message = ""
with open(name_file) as file:
for i in file:
for k in i:
if k != '\n':
message += k
if option == 2:
n = int(input("Enter your first key (n): "))
e = int(input("Enter your second key (e): "))
final_content = first_option(message, n, e)
elif option == 3:
final_content = second_option(message)
var = input("Do you want to save this message in a text file (y/n) ? ")
if var == "y":
savefile.save_file(final_content, "message.txt")
print("\nDone!\n\n")
control() | [
"[email protected]"
] | |
65bdc4f9eb4bdbd64aa2920720db1f17d5831c9b | c7c0bcbeb06a00531795cefdcdb033cfdc25d273 | /chapter-11/orders/setup.py | 5fab50bb1de07e80c29fb2b5bada47e2046c7096 | [
"Apache-2.0",
"MIT"
] | permissive | wallacei/microservices-in-action-copy | c05a9405e9312576a260b8e001d0c9ea2f1fde18 | f9840464a1f9ec40622989e9e5377742246244f3 | refs/heads/main | 2023-05-31T06:42:13.535322 | 2021-06-19T19:23:16 | 2021-06-19T19:23:16 | 377,201,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orders',
version='0.0.1',
description='Orders microservice',
long_description=long_description,
url='https://github.com/pap/simplebank',
author='Simplebank Engineering',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='microservices orders',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'nameko==2.12.0',
'logstash_formatter==0.5.17',
'circuitbreaker==1.3.0',
'gutter==0.5.0',
'request-id==1.0',
'statsd==3.3.0',
'nameko-sentry==1.0.0',
'jaeger-client == 4.3.0',
'pyopenssl==19.1.0',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
| [
"[email protected]"
] | |
c7f974d728172a7299772df1316b74e2b84e35f1 | d0daad3c1afb5ad08f98ba123b20b3b011387b55 | /21-tkinter/02-textos.py | a2242a017b8092a8c456dfc08d4db6c55749fdef | [] | no_license | RubennCastilloo/master-python | a0c17201f947e19905aea9f8bf991395456e2d3d | a12a6fb4bf6884df6df3615a5f4494777cbbbabd | refs/heads/main | 2023-08-12T17:03:09.124907 | 2021-09-16T18:55:38 | 2021-09-16T18:55:38 | 370,833,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from tkinter import *
ventana = Tk()
ventana.geometry("700x500")
texto = Label(ventana, text="=>Bienvenido a mi programa<=")
texto.config(
fg="white",
bg="#000000",
padx=50,
pady=20,
font=("Fira Code", 30),
justify=RIGHT
)
texto.pack(anchor=W)
def pruebas(nombre, apellidos, pais):
return f"Hola {nombre} {apellidos} veo que eres de {pais}"
texto = Label(ventana, text=pruebas(nombre="Ruben", apellidos="Castillo", pais="Mexico"))
texto.config(
height=3,
bg="orange",
font=("Arial", 18),
padx=8,
pady=20,
cursor="spider"
)
texto.pack(anchor=NW)
ventana.mainloop() | [
"[email protected]"
] | |
0a295fa014ecd07653abfe29e4748732058d94cd | bcc78f8113cbaaa14aeaea5dd3a88f46a636c8d4 | /CircuitPlaygroundExpress_LightSensor.py | fc3ed4907b11afe46877e394a4b8dd506f3ce877 | [] | no_license | bribrown/CircuitPython | 6ae59a800202a76741e686758b87c94added23ee | e37d50ae18d97ea54b158f54629ff0eda8a1e10d | refs/heads/master | 2022-04-01T02:55:46.786629 | 2020-01-30T02:31:42 | 2020-01-30T02:31:42 | 116,894,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # CircuitPlaygroundExpress_LightSensor
# reads the on-board light sensor and graphs the brighness with NeoPixels
from simpleio import map_range
from analogio import AnalogIn
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, auto_write=0, brightness=.05)
pixels.fill((0,0,0))
pixels.show()
analogin = AnalogIn(board.LIGHT)
while True:
#light value remaped to pixel position
peak = map_range(analogin.value, 2000, 62000, 0, 9)
print('Peak: ' + str(peak))
print('INT Peak: ' + str(int(peak)))
print('Actual Value: ' + str(analogin.value))
print('----------------')
for i in range(0, 9, 1):
if i <= peak:
pixels[i] = (0, 255, 0)
else:
pixels[i] = (0,0,0)
pixels.show()
time.sleep(1)
| [
"[email protected]"
] | |
1c1842851e7ef3306eade4b5362a299e7a952d0f | 4cdf4e243891c0aa0b99dd5ee84f09a7ed6dd8c8 | /django2/bookmarks/bookmarks/settings.py | 8277bde8c3c7f242eb407532c2ef68e2c0ae896b | [
"MIT"
] | permissive | gozeon/code-collections | 464986c7765df5dca980ac5146b847416b750998 | 13f07176a6c7b6ac13586228cec4c1e2ed32cae4 | refs/heads/master | 2023-08-17T18:53:24.189958 | 2023-08-10T04:52:47 | 2023-08-10T04:52:47 | 99,432,793 | 1 | 0 | NOASSERTION | 2020-07-17T09:25:44 | 2017-08-05T15:56:53 | JavaScript | UTF-8 | Python | false | false | 3,367 | py | """
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a9va+)ulziy57*cci0qv^v#7lo04$%&t-qj*77hg@77q1_&#_d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account.apps.AccountConfig',
'images.apps.ImagesConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
| [
"[email protected]"
] | |
f8aa9cc771efab36e523016cc18be7dd92b8bf88 | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /test/test_study_group_values_controller_api.py | 671f7e874460bcd47617d26a420f26a608131ef4 | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import rcc
from rcc.api.study_group_values_controller_api import StudyGroupValuesControllerApi # noqa: E501
from rcc.rest import ApiException
class TestStudyGroupValuesControllerApi(unittest.TestCase):
"""StudyGroupValuesControllerApi unit test stubs"""
def setUp(self):
self.api = rcc.api.study_group_values_controller_api.StudyGroupValuesControllerApi() # noqa: E501
def tearDown(self):
pass
def test_create11(self):
"""Test case for create11
Create new Study Group Value for current Study based on auth token provided # noqa: E501
"""
pass
def test_delete8(self):
"""Test case for delete8
Delete Study Group Value for current Study based on auth token provided # noqa: E501
"""
pass
def test_get_details8(self):
"""Test case for get_details8
Get specified Study Group Value details # noqa: E501
"""
pass
def test_get_list9(self):
"""Test case for get_list9
Get list of all Study Group Values for specified Study # noqa: E501
"""
pass
def test_update10(self):
"""Test case for update10
Update Study Group Value for current Study based on auth token provided # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a08b6a7a99b0ab5b2de2ff6bf12388fbf6319a48 | c4bfd8ba4c4c0f21bd6a54a9131f0985a5a4fa56 | /crescent/resources/s3/bucket_policy/constants.py | 5ba83647b2baf057d3d871cc99288b7e11f8f64e | [
"Apache-2.0"
] | permissive | mpolatcan/crescent | 405936ec001002e88a8f62d73b0dc193bcd83010 | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | refs/heads/master | 2022-09-05T04:19:43.745557 | 2020-05-25T00:09:11 | 2020-05-25T00:09:11 | 244,903,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from crescent.core.constants import get_values
class _RequiredProperties:
class BucketPolicy:
BUCKET = "Bucket"
POLICY_DOCUMENT = "PolicyDocument"
# --------------------------------------------------
class ResourceRequiredProperties:
BUCKET_POLICY = get_values(_RequiredProperties.BucketPolicy)
| [
"[email protected]"
] | |
a87547d63b4adbc33bfb07abc7b24c84b7af332a | a330851a2b5036ed83e3bdc49041d117af184d50 | /pca/dataanalyzer.py | dcb165e1c51382f320eb84c43eb7277d950f54c6 | [
"MIT"
] | permissive | Wangwuhen/classification-of-encrypted-traffic | 9e28e62956d440fb7ebac334cdc32a7e7dbdf725 | 3c86e098aab58941f9339bb64945c1112ab556ef | refs/heads/master | 2020-04-15T04:10:29.786935 | 2018-06-23T18:20:08 | 2018-06-23T18:20:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,858 | py | import utils
import glob
import os
import pandas as pd
import numpy as np
import math
import pca as p
def getbytes(dataframe, payload_length=810):
values = dataframe['bytes'].values
bytes = np.zeros((values.shape[0], payload_length))
for i, v in enumerate(values):
payload = np.zeros(payload_length, dtype=np.uint8)
payload[:v.shape[0]] = v
bytes[i] = payload
return bytes
def getmeanstd(dataframe, label):
labels = dataframe['label'] == label
bytes = getbytes(dataframe[labels])
# values = dataframe[labels]['bytes'].values
# bytes = np.zeros((values.shape[0], values[0].shape[0]))
# for i, v in enumerate(values):
# bytes[i] = v
# Ys = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
mean = np.mean(bytes, axis=0)
mean_sub = np.subtract(bytes, mean)
std = mean_sub / np.std(bytes, axis=0)
return mean, mean_sub, std
def byteindextoheaderfield(number, TCP=True):
if TCP:
bytenumber = number % 54
else:
bytenumber = number % 42
if bytenumber in range(6):
return "Destination MAC"
if bytenumber in range(6, 12):
return "Source MAC"
if bytenumber in (12, 13):
return "Eth. Type"
if bytenumber == 14:
return "IP Version and header length"
if bytenumber == 15:
return "Explicit Congestion Notification"
if bytenumber in (16, 17):
return "Total Length (IP header)"
if bytenumber in (18, 19):
return "Identification (IP header)"
if bytenumber in (20, 21):
return "Fragment offset (IP header)"
if bytenumber == 22:
return "Time to live (IP header)"
if bytenumber == 23:
return "Protocol (IP header)"
if bytenumber in (24, 25):
return "Header checksum (IP header)"
if bytenumber in range(26, 30):
return "Source IP (IP header)"
if bytenumber in range(30, 34):
return "Destination IP (IP header)"
if bytenumber in (34, 35):
return "Source Port (TCP/UDP header)"
if bytenumber in (36, 37):
return "Destination Port (TCP/UDP header)"
if bytenumber in range(38, 42):
if TCP:
return "Sequence number (TCP header)"
elif bytenumber in (38, 39):
return "Length of data (UDP Header)"
else:
return "UDP Checksum (UDP Header)"
if bytenumber in range(42, 46):
return "ACK number (TCP header)"
if bytenumber == 46:
return "TCP Header length or Nonce (TCP header)"
if bytenumber == 47:
return "TCP FLAGS (CWR, ECN-ECHO, ACK, PUSH, RST, SYN, FIN) (TCP header)"
if bytenumber in (48, 49):
return "Window size (TCP header)"
if bytenumber in (50, 51):
return "Checksum (TCP header)"
if bytenumber in (52, 53):
return "Urgent Pointer (TCP header)"
| [
"[email protected]"
] | |
3775521386c59304a0872b9053c2111fdfe7ca55 | da687718aa8ce62974090af63d25e057262e9dfe | /cap14-funcoes/extras/entrada.py | 8f9a269e72ba810cb2bb7d637f9fbdeaae697fbd | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | def valida_inteiro(mensagem, minimo, maximo):
while True:
try:
v = int(input(mensagem))
if v >= minimo and v <= maximo:
return v
else:
print(f'Digite um valor entre {maximo} e {minimo}.')
except: print('Voce deve digitar um numero inteiro.') | [
"[email protected]"
] | |
5aadabb6bec3aec95c8f54c9736e197ced6a47ab | 0daf6763c960cd898e9bb5612b1314d7e34b8870 | /mnist_1/data.py | b1bf29e2af4aca2bbe3f70fd3c775cddef6107cf | [
"MIT"
] | permissive | evanthebouncy/nnhmm | a6ba2a1f0ed2c90a0188de8b5e162351e6668565 | acd76edaa1b3aa0c03d39f6a30e60d167359c6ad | refs/heads/master | 2021-01-12T02:27:32.814908 | 2017-04-01T05:01:24 | 2017-04-01T05:01:24 | 77,956,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,180 | py | import numpy as np
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_L = 10
L = 14
N_BATCH = 50
OBS_SIZE = 20
KEEP = 0.6
# ---------------------------- helpers
def black_white(img):
new_img = np.copy(img)
img_flat = img.flatten()
nonzeros = img_flat[np.nonzero(img_flat)]
sortedd = np.sort(nonzeros)
idxx = round(len(sortedd) * (1.0 - KEEP))
thold = sortedd[idxx]
mask_pos = img >= thold
mask_neg = img < thold
new_img[mask_pos] = 1.0
new_img[mask_neg] = 0.0
return new_img
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# -------------------------------------- making the datas
# assume X is already a 2D matrix
def mk_query(X):
def query(O):
Ox, Oy = O
if X[Ox][Oy] == 1.0:
return [1.0, 0.0]
else:
return [0.0, 1.0]
return query
def sample_coord():
return np.random.randint(0, L), np.random.randint(0, L)
def sample_coord_center():
Ox, Oy = np.random.multivariate_normal([L/2,L/2], [[L*0.7, 0.0], [0.0, L*0.7]])
Ox, Oy = round(Ox), round(Oy)
if 0 <= Ox < L:
if 0 <= Oy < L:
return Ox, Oy
return sample_coord()
def sample_coord_bias(qq):
def find_positive(qq):
C = sample_coord()
if qq(C) == [1.0, 0.0]:
return C
else:
return find_positive(qq)
def find_negative(qq):
C = sample_coord()
if qq(C) == [0.0, 1.0]:
return C
else:
return find_negative(qq)
toss = np.random.random() < 0.5
if toss:
return find_positive(qq)
else:
return find_negative(qq)
def gen_O(X):
query = mk_query(X)
Ox, Oy = sample_coord()
O = (Ox, Oy)
return O, query(O)
def get_img_class(test=False):
img, _x = mnist.train.next_batch(1)
if test:
img, _x = mnist.test.next_batch(1)
img = np.reshape(img[0], [2*L,2*L])
# rescale the image to 14 x 14
# img = imresize(img, (14,14), interp='nearest') / 255.0
img = gaussian_filter(imresize(img, (14,14)) / 255.0, 0.11)
img = black_white(img)
return img, _x[0]
# a trace is named tuple
# (Img, S, Os)
# where Img is the black/white image
# where S is the hidden hypothesis (i.e. label of the img)
# Os is a set of Observations which is (qry_pt, label)
import collections
Trace = collections.namedtuple('Trace', 'Img S Os')
def gen_rand_trace(test=False):
img, _x = get_img_class(test)
obs = []
for ob_idx in range(OBS_SIZE):
obs.append(gen_O(img))
return Trace(img, _x, obs)
# a class to hold the experiences
class Experience:
def __init__(self, buf_len):
self.buf = []
self.buf_len = buf_len
def trim(self):
while len(self.buf) > self.buf_len:
self.buf.pop()
def add(self, trace):
self.buf.append(trace)
self.trim()
def sample(self):
idxxs = np.random.choice(len(self.buf), size=1, replace=False)
return self.buf[idxxs[0]]
def data_from_exp(exp):
traces = [exp.sample() for _ in range(N_BATCH)]
x = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
new_ob_x = []
new_ob_y = []
new_ob_tf = []
imgs = []
for bb in range(N_BATCH):
trr = traces[bb]
# generate a hidden variable X
# get a single thing out
img = trr.Img
_x = trr.S
imgs.append(img)
x.append(_x)
# generate a FRESH new observation for demanding an answer
_new_ob_coord, _new_ob_lab = gen_O(img)
_new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
new_ob_x.append(_new_ob_x)
new_ob_y.append(_new_ob_y)
new_ob_tf.append(_new_ob_lab)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = trr.Os[ob_idx]
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(x, np.float32),\
np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
np.array(obs_tfs, np.float32),\
np.array(new_ob_x, np.float32),\
np.array(new_ob_y, np.float32),\
np.array(new_ob_tf, np.float32), imgs
# the thing is we do NOT use the trace observations, we need to generate random observations
# to be sure we can handle all kinds of randomizations
def inv_data_from_label_data(labelz, inputz):
labs = []
obss = []
for bb in range(N_BATCH):
img = inputz[bb]
lab = labelz[bb]
labs.append(lab)
obs = np.zeros([L,L,2])
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
ob_coord, ob_lab = gen_O(img)
ox, oy = ob_coord
if ob_lab[0] == 1.0:
obs[ox][oy][0] = 1.0
if ob_lab[1] == 1.0:
obs[ox][oy][1] = 1.0
obss.append(obs)
return np.array(labs, np.float32),\
np.array(obss, np.float32)
# uses trace info
def inv_batch_obs(labz, batch_Os):
obss = []
for bb in range(N_BATCH):
Os = batch_Os[bb]
obs = np.zeros([L,L,2])
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
ob_coord, ob_lab = Os[ob_idx]
ox, oy = ob_coord
if ob_lab[0] == 1.0:
obs[ox][oy][0] = 1.0
if ob_lab[1] == 1.0:
obs[ox][oy][1] = 1.0
obss.append(obs)
return np.array(labz, np.float32),\
np.array(obss, np.float32)
# def gen_data():
# x = []
#
# obs_x = [[] for i in range(OBS_SIZE)]
# obs_y = [[] for i in range(OBS_SIZE)]
# obs_tfs = [[] for i in range(OBS_SIZE)]
# new_ob_x = []
# new_ob_y = []
# new_ob_tf = []
#
# imgs = []
#
# for bb in range(N_BATCH):
# # generate a hidden variable X
# # get a single thing out
# img, _x = get_img_class()
# imgs.append(img)
#
# # add to x
# x.append(_x[0])
# # generate new observation
# _new_ob_coord, _new_ob_lab = gen_O(img)
# _new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
# new_ob_x.append(_new_ob_x)
# new_ob_y.append(_new_ob_y)
# new_ob_tf.append(_new_ob_lab)
#
# # generate observations for this hidden variable x
# for ob_idx in range(OBS_SIZE):
# _ob_coord, _ob_lab = gen_O(img)
# _ob_x, _ob_y = vectorize(_ob_coord)
# obs_x[ob_idx].append(_ob_x)
# obs_y[ob_idx].append(_ob_y)
# obs_tfs[ob_idx].append(_ob_lab)
#
# return np.array(x, np.float32),\
# np.array(obs_x, np.float32),\
# np.array(obs_y, np.float32),\
# np.array(obs_tfs, np.float32),\
# np.array(new_ob_x, np.float32),\
# np.array(new_ob_y, np.float32),\
# np.array(new_ob_tf, np.float32), imgs
| [
"[email protected]"
] | |
853d732eaf48491fce608ac8d9a44c9258a5567e | 3dca607dcc753d4ffc06b8c4e559becb02174e51 | /Querymaker/__init__.py | 2236651f7ff0edaf5f00f8f2c18dfc7591575090 | [] | no_license | michelsn1/query | e1b51c9f2107a9ec57fd7b010e823b2e2ad3549f | 04f5e8bec6c5deb6487c25c94fba040082dde08e | refs/heads/master | 2020-04-25T17:58:02.450538 | 2019-02-27T18:30:26 | 2019-02-27T18:30:26 | 172,967,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from Querymaker.Querymaker import Querymaker
| [
"[email protected]"
] | |
c4c19d754d5ca8299ee238a2f3fe86ae2ca8ca8d | da650c454c9f961c38310de25dc9367d19873ae5 | /chalice/blog/models.py | 464b25b4bdeea57b7cd5695b1881c519ed97b021 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | andrimarjonsson/chalice | c356e39ae4fa11419d11725db8429482667b50da | e0140cf415372925aa97ef26ef306a868b66fd3e | refs/heads/master | 2021-03-12T22:08:15.976799 | 2013-04-04T18:14:14 | 2013-04-04T18:14:14 | 3,710,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | from sqlalchemy.ext.hybrid import hybrid_property
from datetime import datetime
from chalice.extensions import db
from chalice.helpers import slugify
class Post(db.Model):
__tablename__ = 'posts'
# -- Columns
id = db.Column(db.Integer, primary_key = True)
_title = db.Column('title', db.String(255), nullable = False)
_slug = db.Column('slug', db.String(255), unique = True, nullable = False)
text = db.Column(db.Text)
create_date = db.Column(db.DateTime)
edit_date = db.Column(db.DateTime)
# -- Relationships
# Many to many - Post <-> Tag
_tags = db.relationship('Tag', secondary='post_tags', backref=db.backref('posts', lazy='dynamic'))
# -- Methods and properties
def __init__(self, title, text):
self.title = title
self.text = text
self.create_date = datetime.utcnow()
self.edit_date = datetime.utcnow()
@hybrid_property
def title(self):
return self._title
@title.setter
def title(self, title):
self._title = title
self._slug = slugify(unicode(title))
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, taglist):
self._tags = []
for tag_name in taglist:
self._tags.append(Tag.get_or_create(tag_name))
@hybrid_property
def slug(self):
return self._slug
def __repr__(self):
return '<Post %s>' % self.slug
class Tag(db.Model):
__tablename__ = 'tags'
# -- Columns
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(), unique = True, nullable = False)
# -- Classmethods
@classmethod
def get_or_create(cls, tagname):
tag = cls.query.filter(cls.name == tagname).first()
if not tag:
tag = cls(tagname)
return tag
# -- Methods and properties
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Tag %s>' % self.name
# Association table for Post <-> Tag
post_tags = db.Table('post_tags', db.Model.metadata,
db.Column('post_id', db.Integer, db.ForeignKey('posts.id', ondelete='CASCADE')),
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id', ondelete='CASCADE')))
| [
"[email protected]"
] | |
9a3f3542a14276c1794492528c5d906908c7f791 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /evaluation/logger/pytracking.py | 6e17451f77982c297479789660635ffca35a1ee4 | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | import numpy as np
import os
class PyTrackingLogger:
def __init__(self, output_path=None):
self.output_path = output_path
def log_sequence_result(self, name: str, predicted_bboxes: np.ndarray, **kwargs):
print(f'Sequence: {name}')
print(f'FPS: {kwargs["fps"]}')
predicted_bboxes = predicted_bboxes.copy()
predicted_bboxes[:, 0] += 1
predicted_bboxes[:, 1] += 1
if self.output_path is not None:
np.savetxt(os.path.join(self.output_path, '{}.txt'.format(name)), predicted_bboxes, delimiter='\t',
fmt='%d')
| [
"[email protected]"
] | |
616ea5ba74efee01d8be2e72d9173f4321fde216 | 6c46dac1b14a6614fe2f28830cdf3e34a314b370 | /src/RouteWatch/ais_cleaning_script.py | 291e697fbc4ca8d17cfbf80421cdc34f10c483b2 | [] | no_license | christensenmichael0/christensenmichael0.github.io | 92d455d2fd4d84aad449244fc24bb319216f9137 | 899763a69a2ca9fbc5ed6e748f95e73a995f0b30 | refs/heads/master | 2020-12-24T10:59:01.765103 | 2017-04-02T16:15:03 | 2017-04-02T16:15:03 | 73,206,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,023 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 28 10:32:29 2016
@author: Mike
"""
import os
import datetime
#Load the data
parent_dir='C:\Users\Mike\Documents\Python Scripts\AIS\HMI_AIS\Single_Day_Gulf'
in_file_name='AIS_data.csv'
full_file_path_in=os.path.join(parent_dir,in_file_name)
out_file_name='AIS_data_cleaned.csv'
full_file_path_out=os.path.join(parent_dir,out_file_name)
f_in=file(full_file_path_in,'r')
f_out=file(full_file_path_out,'w')
counter=0
output_line='dummy'
aggregate_data=[]
while len(output_line):
output_line=f_in.readline().strip()
if output_line!='':
split_output=output_line.split(';')
retrieved_cols=[split_output[0].replace('\"',''),split_output[3],
split_output[4], split_output[6],split_output[7],
split_output[8], split_output[9],split_output[10],
split_output[11], split_output[14],split_output[19]+'\n']
#[mmsi,date,time,lat,lon,sog,cog,heading,underway_status,ship_type]
if (not '' in retrieved_cols) and (split_output[14].isdigit()) \
and ('under_way' in split_output[11].lower()) and \
(('6' == split_output[14][0]) or ('7' == split_output[14][0]) or
('8' == split_output[14][0])) and float(split_output[8])>5.0 and \
split_output[19]=='V':
counter+=1
write_string=', '.join(retrieved_cols)
f_out.write(write_string)
aggregate_data.append(retrieved_cols)
f_in.close()
f_out.close()
#Create the basemap for plotting data
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from collections import defaultdict
plt.close('all')
# create figure and axes instances
fig = plt.figure(figsize=(12,12))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# create polar stereographic Basemap instance.
m = Basemap(projection='merc',llcrnrlat=18.,urcrnrlat=31,llcrnrlon=-100., \
urcrnrlon=-78.,lat_ts=25,resolution='f')
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels.
parallels = np.arange(18.,31.,3.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
meridians = np.arange(-100.,-78.,5.)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
#find unique vessels... maybe then plot their tracks
time_max=datetime.datetime.min
time_min=datetime.datetime.max
unique_mmsi=set([x[0] for x in aggregate_data]) #371 unique vessels
ship_grouper=defaultdict(set)
for ship in list(unique_mmsi):
ship_type=[x[9].strip() for x in aggregate_data if x[0]==ship][0]
ship_lats=[float(x[3]) for x in aggregate_data if x[0]==ship]
ship_lons=[float(x[4]) for x in aggregate_data if x[0]==ship]
ship_datetime_str=[x[1]+' '+x[2] for x in aggregate_data if x[0]==ship]
ship_datetime_list=[datetime.datetime.strptime(dt,'%d/%m/%Y %H:%M:%S') for dt in ship_datetime_str]
time_sort_index=sorted(range(len(ship_datetime_list)), key=lambda k: ship_datetime_list[k])
#keep track of the running min and max time for the figure title
time_min=min(min(ship_datetime_list),time_min)
time_max=max(max(ship_datetime_list),time_max)
ship_lats_sort=[ship_lats[index] for index in time_sort_index]
ship_lons_sort=[ship_lons[index] for index in time_sort_index]
convert_x, convert_y = m(ship_lons_sort, ship_lats_sort)
if '6' in ship_type:
use_color='r'
vname='passenger'
elif '7' in ship_type:
use_color='g'
vname='cargo'
else:
use_color='b' #tankers
vname='tanker'
ship_grouper[vname].add(ship)
#plot the different class vessels with their own separate colors
plt.plot(convert_x,convert_y,'k.-',markersize=10,color=use_color,label=vname)
# plt.text(convert_x[-1],convert_y[-1],ship,fontsize=8)
#Add the Legend
red_line = mlines.Line2D([], [], color='red', marker='.',
markersize=10, label='Passenger Vessels')
green_line = mlines.Line2D([], [], color='green', marker='.',
markersize=10, label='Cargo Vessels')
blue_line = mlines.Line2D([], [], color='blue', marker='.',
markersize=10, label='Tanker Vessels')
plt.legend(handles=[red_line,green_line,blue_line],loc=2)
time_min_str=datetime.datetime.strftime(time_min,'%Y-%m-%d %H:%M:%S')
time_max_str=datetime.datetime.strftime(time_max,'%Y-%m-%d %H:%M:%S')
plt.title('T-AIS and S-AIS for Specific Vessel Classes: '+time_min_str + \
' -- ' + time_max_str + ' UTC',fontsize=16,fontweight='bold')
#maximize the figure
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.savefig('Single_day_AIS.png', bbox_inches='tight',dpi=150)
#30 passenger vessels
#189 cargo vessels
#151 tanker vessels
| [
"[email protected]"
] | |
bcbef73a96b7eff72d57d60eafee985375118ffe | c3a1f5a4680ea73c1b25a93efe21b6194374969f | /acronymb.py | f6b0a01485bb9053a5a832b3c37dee4f0e6aec83 | [] | no_license | tennyson-mccalla/PPaItCS | 753cf6071c617a6daccaf1a04d9a774559fe1454 | 39bf1a586cfd366ce1b6e86539bc1b40019a9b51 | refs/heads/master | 2021-09-03T23:40:46.474671 | 2018-01-12T22:50:14 | 2018-01-12T22:50:14 | 84,278,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | def main():
print("This program creates a file of acronyms from a")
print("file of phrases.")
infilePhrase = input("What file are the phrases in?: ")
outfilePhrase = input("What file do the acronyms go in?: ")
infile = open(infilePhrase, "r")
outfile = open(outfilePhrase, "w")
for phrase in infile:
Sphrase = phrase.split()
ac = ""
for w in Sphrase:
ac = ac + w[0]
UAC = ac.upper()
print("The acronym is {0}".format(UAC))
print(UAC, file = outfile)
infile.close()
outfile.close()
print("Acronyms have been written to", outfilePhrase)
main()
| [
"[email protected]"
] | |
2cc99d4c4c0a1b67ee46321cced5de73e72fae1a | 9ec55734202ff6aa36cf8dae51ba27d3b8afa0ea | /scripts/arguments.py | 5f3a9d82e9a3279e9428cab44ad7fd6b458cec5d | [
"MIT"
] | permissive | AymanMukh/superquadric_parsing | d46bd833b4ec286032121093e463374c81dfbc8a | 40750abdbb2e01c1ed3708bbcb901daed5e29348 | refs/heads/master | 2020-07-03T22:14:32.251669 | 2019-08-09T11:05:11 | 2019-08-09T11:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,314 | py |
def add_voxelizer_parameters(parser):
parser.add_argument(
"--voxelizer_factory",
choices=[
"occupancy_grid",
"tsdf_grid",
"image"
],
default="occupancy_grid",
help="The voxelizer factory to be used (default=occupancy_grid)"
)
parser.add_argument(
"--grid_shape",
type=lambda x: tuple(map(int, x.split(","))),
default="32,32,32",
help="The dimensionality of the voxel grid (default=(32, 32, 32)"
)
parser.add_argument(
"--save_voxels_to",
default=None,
help="Path to save the voxelised input to the network"
)
parser.add_argument(
"--image_shape",
type=lambda x: tuple(map(int, x.split(","))),
default="3,137,137",
help="The dimensionality of the voxel grid (default=(3,137,137)"
)
def add_training_parameters(parser):
"""Add arguments to a parser that are related with the training of the
network.
"""
parser.add_argument(
"--epochs",
type=int,
default=150,
help="Number of times to iterate over the dataset (default=150)"
)
parser.add_argument(
"--steps_per_epoch",
type=int,
default=500,
help=("Total number of steps (batches of samples) before declaring one"
" epoch finished and starting the next epoch (default=500)")
)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="Number of samples in a batch (default=32)"
)
parser.add_argument(
"--lr",
type=float,
default=1e-3,
help="Learning rate (default 1e-3)"
)
parser.add_argument(
"--lr_epochs",
type=lambda x: map(int, x.split(",")),
default="500,1000,1500",
help="Training epochs with diminishing learning rate"
)
parser.add_argument(
"--lr_factor",
type=float,
default=1.0,
help=("Factor according to which the learning rate will be diminished"
" (default=None)")
)
parser.add_argument(
"--optimizer",
choices=["Adam", "SGD"],
default="Adam",
help="The optimizer to be used (default=Adam)"
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help=("Parameter used to update momentum in case of SGD optimizer"
" (default=0.9)")
)
def add_dataset_parameters(parser):
parser.add_argument(
"--dataset_type",
default="shapenet_quad",
choices=[
"shapenet_quad",
"shapenet_v1",
"shapenet_v2",
"surreal_bodies",
"dynamic_faust"
],
help="The type of the dataset type to be used"
)
parser.add_argument(
"--n_points_from_mesh",
type=int,
default=1000,
help="The maximum number of points to sample from mesh (default=1e3)"
)
parser.add_argument(
"--model_tags",
type=lambda x: x.split(":"),
default=[],
help="The tags to the model to be used for testing",
)
def add_nn_parameters(parser):
"""Add arguments to control the design of the neural network architecture.
"""
parser.add_argument(
"--architecture",
choices=["tulsiani", "octnet", "resnet18"],
default="tulsiani",
help="Choose the architecture to train"
)
parser.add_argument(
"--n_encoder_layers",
type=int,
default=5,
help="Number of repeated stacked layers for encoder (default=5)"
)
parser.add_argument(
"--kernel_size",
type=int,
default=3,
help="Kernel size (default=3)"
)
parser.add_argument(
"--padding",
type=int,
default=1,
help="Padding (default=1)"
)
parser.add_argument(
"--stride",
type=int,
default=1,
help="Stride (default=1)"
)
parser.add_argument(
"--train_with_bernoulli",
action="store_true",
help="Learn the Bernoulli priors during training"
)
parser.add_argument(
"--make_dense",
action="store_true",
help="When true use an additional FC before its regressor"
)
def add_tsdf_fusion_parameters(parser):
"""Add arguments that convert a mesh to a tsdf grid
"""
parser.add_argument(
"--n_views",
type=int,
default=20,
help="Number of depth maps per model (default=20)"
)
parser.add_argument(
"--image_size",
type=lambda x: tuple(map(int, x.split(","))),
default="640,640",
help="The size of the rendered depth map (default=640,640)"
)
parser.add_argument(
"--focal_length",
type=lambda x: tuple(map(int, x.split(","))),
default="640,640",
help="The focal length along the x, y axis (default=640,640)"
)
parser.add_argument(
"--principal_point",
type=lambda x: tuple(map(int, x.split(","))),
default="320,320",
help=("The principal point location along the x, y axis"
" (default=320,320)")
)
parser.add_argument(
"--resolution",
type=int,
default=32,
help="Voxel resolution (default=32)"
)
parser.add_argument(
"--depth_offset_factor",
type=float,
default=1.0,
help="Depthmaps are offsetted using depth_offset_factor*voxel_size"
)
parser.add_argument(
"--truncation_factor",
type=float,
default=1.25,
help=("Truncation for the TSDF is derived as "
"truncation_factor*voxel_size")
)
def add_regularizer_parameters(parser):
parser.add_argument(
"--regularizer_type",
choices=[
"bernoulli_regularizer",
"entropy_bernoulli_regularizer",
"parsimony_regularizer",
"overlapping_regularizer",
"sparsity_regularizer"
],
nargs="+",
default=[],
help=("The type of the regularizer on the shapes to be used"
" (default=None)")
)
parser.add_argument(
"--bernoulli_regularizer_weight",
type=float,
default=0.0,
help=("The importance of the regularization term on Bernoulli priors"
" (default=0.0)")
)
parser.add_argument(
"--maximum_number_of_primitives",
type=int,
default=5000,
help=("The maximum number of primitives in the predicted shape "
" (default=5000)")
)
parser.add_argument(
"--minimum_number_of_primitives",
type=int,
default=5,
help=("The minimum number of primitives in the predicted shape "
" (default=5)")
)
parser.add_argument(
"--entropy_bernoulli_regularizer_weight",
type=float,
default=0.0,
help=("The importance of the regularizer term on the entropy of"
" the bernoullis (default=0.0)")
)
parser.add_argument(
"--sparsity_regularizer_weight",
type=float,
default=0.0,
help="The weight on the sparsity regularizer (default=0.0)"
)
parser.add_argument(
"--parsimony_regularizer_weight",
type=float,
default=0.0,
help="The weight on the parsimony regularizer (default=0.0)"
)
parser.add_argument(
"--overlapping_regularizer_weight",
type=float,
default=0.0,
help="The weight on the overlapping regularizer (default=0.0)"
)
parser.add_argument(
"--enable_regularizer_after_epoch",
type=int,
default=0,
help="Epoch after which regularizer is enabled (default=10)"
)
parser.add_argument(
"--w1",
type=float,
default=0.005,
help="The weight on the first term of the sparsity regularizer (default=0.005)"
)
parser.add_argument(
"--w2",
type=float,
default=0.005,
help="The weight on the second term of the sparsity regularizer (default=0.005)"
)
def add_sq_mesh_sampler_parameters(parser):
parser.add_argument(
"--D_eta",
type=float,
default=0.05,
help="Step along the eta (default=0.05)"
)
parser.add_argument(
"--D_omega",
type=float,
default=0.05,
help="Step along the omega (default=0.05)"
)
parser.add_argument(
"--n_points_from_sq_mesh",
type=int,
default=180,
help="Number of points to sample from the mesh of the SQ (default=180)"
)
def add_gaussian_noise_layer_parameters(parser):
parser.add_argument(
"--add_gaussian_noise",
action="store_true",
help="Add Gaussian noise in the layers"
)
parser.add_argument(
"--mu",
type=float,
default=0.0,
help="Mean value of the Gaussian distribution"
)
parser.add_argument(
"--sigma",
type=float,
default=0.001,
help="Standard deviation of the Gaussian distribution"
)
def add_loss_parameters(parser):
parser.add_argument(
"--loss_type",
default="euclidean_dual_loss",
choices=[
"euclidean_dual_loss"
],
help="The type of the loss to be used"
)
parser.add_argument(
"--prim_to_pcl_loss_weight",
default=1.0,
type=float,
help=("The importance of the primitive-to-pointcloud loss in the "
"final loss (default = 1.0)")
)
parser.add_argument(
"--pcl_to_prim_loss_weight",
default=1.0,
type=float,
help=("The importance of the pointcloud-to-primitive loss in the "
"final loss (default = 1.0)")
)
def add_loss_options_parameters(parser):
parser.add_argument(
"--use_sq",
action="store_true",
help="Use Superquadrics as geometric primitives"
)
parser.add_argument(
"--use_cuboids",
action="store_true",
help="Use cuboids as geometric primitives"
)
parser.add_argument(
"--use_chamfer",
action="store_true",
help="Use the chamfer distance"
)
def voxelizer_shape(args):
if args.voxelizer_factory == "occupancy_grid":
return args.grid_shape
elif args.voxelizer_factory == "image":
return args.image_shape
elif args.voxelizer_factory == "tsdf_grid":
return (args.resolution,)*3
def get_loss_weights(args):
args = vars(args)
loss_weights = {
"pcl_to_prim_weight": args.get("pcl_to_prim_loss_weight", 1.0),
"prim_to_pcl_weight": args.get("prim_to_pcl_loss_weight", 1.0),
}
return loss_weights
def get_loss_options(args):
loss_weights = get_loss_weights(args)
args = vars(args)
# Create a dicitionary with the loss options based on the input arguments
loss_options = {
"use_sq": args.get("use_sq", False),
"use_cuboids": args.get("use_cuboids", False),
"use_chamfer": args.get("use_chamfer", False),
"loss_weights": loss_weights
}
return loss_options
| [
"[email protected]"
] | |
6bf462112c68e100b92acc5b9b8ed814e8f09d27 | ef4a1748a5bfb5d02f29390d6a66f4a01643401c | /algorithm/new_teacher_algorithm/AD/도약.py | 5c781e9d4bc4c9a28efdc8ca127c58b5528ef92d | [] | no_license | websvey1/TIL | aa86c1b31d3efc177df45503d705b3e58b800f8e | 189e797ba44e2fd22a033d1024633f9e0128d5cf | refs/heads/master | 2023-01-12T10:23:45.677578 | 2019-12-09T07:26:59 | 2019-12-09T07:26:59 | 162,102,142 | 0 | 1 | null | 2022-12-11T16:31:08 | 2018-12-17T08:57:58 | Python | UTF-8 | Python | false | false | 2,093 | py | import sys
sys.stdin = open("도약.txt")
###########################################################
########################## 두개 쓰기 ########################
###########################################################
# def lowerSearch(s,e,f):
# # f 이상 중에서 가장 작은 값의 위치를 리턴
# sol = -1
# while s<=e:
# m = (s+e)//2
# if data[m] >= f: # f 이상이면 왼쪽영역 재탐색(더 작은 값 찾기 위해)
# sol = m
# e = m-1
# else:
# s= m+1 #우측탐색)
# return sol
#
# def upperSearch(s,e,f):
# # f 이하중에서 가장 큰 값의 위치를 리턴
# sol = -1
# while s<=e:
# m = (s+e)//2
# if data[m] <= f: # 데이타 이하면 오른쪽 재탐색(더 큰걸 찾기위해)
# sol = m
# s = m+1
# else:
# e= m-1
# return sol
# N = int(input())
# data = sorted([(int(input())) for i in range(N)])
# cnt = 0
# for i in range(N-2):
# for j in range(i+1, N-1):
# S = data[j]+(data[j]-data[i])
# E = data[j] + (data[j] - data[i])*2
# lo = lowerSearch(j+1, N-1, S)
# if lo==-1 or data[lo]>E: continue
# up = upperSearch(j+1, N-1, E)
# cnt += (up-lo+1)
# print(cnt)
###########################################################
########################## 하나 쓰기########################
###########################################################
def upperSearch(s,e,f):
# f 이하중에서 가장 큰 값의 위치를 리턴
sol = -1
while s<=e:
m = (s+e)//2
if data[m] < f: # 데이타 이하면 오른쪽 재탐색(더 큰걸 찾기위해)
s = m + 1
sol = m
else:
e= m-1
return sol
N = int(input())
data = sorted([(int(input())) for i in range(N)])
cnt = 0
for i in range(N-2):
for j in range(i+1, N-1):
S = data[j]+(data[j]-data[i])
E = data[j] + (data[j] - data[i])*2
cnt += upperSearch(j, N- 1, E+1) - upperSearch(j, N-1, S)
print(cnt)
| [
"[email protected]"
] | |
6822bcb3b1c87fd53497fffdb8c8b67432f9ece8 | 06f6f206858f6d20128aa58871a6b77b63296921 | /EXERCÍCIOS DE CLASS, POO, TKINTER, PYMYSQL/Exercicios de POO.CARROS.py | d396f628be5e7e5fc018017a51ba8d30f806373a | [
"MIT"
] | permissive | candido00/Python | cd9512c53e342dcd40154690bb21a02fd1a27256 | 4b6721d55828191ca2ede0c5a0c36f6f5afdad55 | refs/heads/master | 2020-08-13T02:06:35.639144 | 2019-10-13T20:18:38 | 2019-10-13T20:18:38 | 214,887,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | import tkinter
janela = tkinter.Tk()
janela.geometry("400x500")
titulo= tkinter.Label(janela, text= "calculadora do prejuizo")
titulo.pack()
frameinserir= tkinter.Frame(janela)
frameinserir.pack()
taxaLabel= tkinter.Label(frameinserir,text=" Taxa de Juros:")
taxaLabel.pack(side="left")
InserirEntry1= tkinter.Entry(frameinserir)
InserirEntry1.pack(side="right")
#-------------------------------------------------------------------------------
frameinserir= tkinter.Frame(janela)
frameinserir.pack()
textLabel= tkinter.Label(frameinserir,text="Valor Solicitado:")
textLabel.pack(side="left")
InserirEntry2= tkinter.Entry(frameinserir)
InserirEntry2.pack(side="right")
#-------------------------------------------------------------------------------
frameinserir= tkinter.Frame(janela)
frameinserir.pack()
textLabel= tkinter.Label(frameinserir,text=" Nº Parcelas:")
textLabel.pack(side="left")
InserirEntry3= tkinter.Entry(frameinserir)
InserirEntry3.pack(side="right")
#------------------------------------------------------------------------------
def TAXAJUROS():
float(InserirEntry.get()/100)
pass
def VALORSOLICITADO():
float(InserirEntry2.get())
pass
def NPARCELAS():
float(InserirEntry3.get())
pass
def CALCULAR():
taxa=float(InserirEntry1.get())/100*float(InserirEntry2.get())
valortotal= float(InserirEntry2.get())+taxa
parcelas= float(valortotal/(float(InserirEntry3.get())))
texto=("Resultado: \n Valor total: ",valortotal,"\n Parcela de: ",parcelas)
text.insert("insert",texto)
pass
#-------------------------------------------------------------------------------
frameinserir= tkinter.Frame(janela)
frameinserir.pack()
botao= tkinter.Button(frameinserir,text="Calcular",command=CALCULAR)
botao.pack(side="right")
text= tkinter.Text(janela)
text.pack()
| [
"[email protected]"
] | |
6bd108b17991c69a5e2e6b32d15557bac57258d4 | 767621262f1c758713cd77630e5f3c1ce1027737 | /contrib/seeds/makeseeds.py | 545592aa188f967497074688fb619016e0bc6b1b | [
"MIT"
] | permissive | ckxxz/abacoin | 534b0ce7d6077df4c67ef113c8eccb05f649a8da | 089b3b0594ced2fed1ae576060e411455277b903 | refs/heads/master | 2020-03-28T15:24:35.448647 | 2018-08-03T04:33:32 | 2018-08-03T04:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,783 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.13.(0|1|2|99)/|/AbacoinCore:0.13.(0|1|2|99)/|/AbacoinCore:0.14.(0|1|2|99)/|/AbacoinCore:0.15.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2dd695de592207a29117c4190a29f7a409cce12d | 3fc887bb0758437ec38d1bf91a5744b7dd869f80 | /migrations/versions/5a5d28dae37f_.py | 81e48e94f8fc201b949b2dd63c6eb0e655126ada | [] | no_license | row-yanbing/Job-back-end | 9ec3814fb90afbe1816615603c9101f1356c8dbb | 1df34a65758072d28d147b246acb82633faf9e53 | refs/heads/master | 2023-08-14T17:07:21.576143 | 2021-10-06T14:20:06 | 2021-10-06T14:20:06 | 338,720,165 | 0 | 0 | null | 2021-02-14T03:19:40 | 2021-02-14T03:19:39 | null | UTF-8 | Python | false | false | 860 | py | """empty message
Revision ID: 5a5d28dae37f
Revises: 00fd219bf242
Create Date: 2020-04-23 13:05:31.421385
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a5d28dae37f'
down_revision = '00fd219bf242'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('company', schema=None) as batch_op:
batch_op.add_column(sa.Column('isVerify', sa.SmallInteger(), nullable=True, comment='是否通过审核 1-待审核 2-通过 3-未通过'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('company', schema=None) as batch_op:
batch_op.drop_column('isVerify')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
b563da1a4aa94a36c4599e6482162f6ded7d93e9 | 5b2218208aef68cf06609bcc3bf42b499d99d5f6 | /docs/source/conf.py | e94bc7c7e29be180973b828865b19e7712c33ad6 | [
"MIT"
] | permissive | c137digital/unv_app_template | c36cacfff3e0be0b00ecad6365b20b434836ffe7 | a1d1f2463334afc668cbf4e8acbf1dcaacc93e80 | refs/heads/master | 2020-05-25T19:24:17.098451 | 2020-04-24T19:33:08 | 2020-04-24T19:33:08 | 187,950,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,816 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# -- Project information -----------------------------------------------------
project = 'unv_app_template'
copyright = '2020, change'
author = 'change'
# The short X.Y version
version = '0.1'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'unv_app_templatedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'unv_app_template.tex', 'unv\\_template Documentation',
'change', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unv_app_template', 'unv_app_template Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'unv_app_template', 'unv_app_template Documentation',
author, 'unv_app_template', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"[email protected]"
] | |
882b159715ff76036db8533e308519b5fac19989 | 84bf5f6dcc31624624464a694626bff9a9985070 | /app/web_config.py | a21a9f61f76341109c168aa82c1d4e4e6689f28c | [
"MIT"
] | permissive | TrixiS/web-bot | 82346bab82dc1ed4b76f4e51d72d6b33aa68ad92 | 523d02deb30993eb8ab5071ad78111719becd7c7 | refs/heads/master | 2023-06-21T17:48:49.212662 | 2021-08-09T17:06:02 | 2021-08-09T17:06:02 | 392,773,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | import json
from pathlib import Path
from typing import List
import aiofiles
from pydantic import BaseModel, Field
config_path = Path(__file__).parent / "../config.json"
class Config(BaseModel):
bot_token: str = Field(None, title="Токен бота")
command_prefixes: List[str] = Field(["!", "!!"], title="Префиксы команд")
class Phrases(BaseModel):
bot_started: str = Field("Бот {bot.user} успешно запущен")
bot: str = Field("Управление ботом")
bot_start: str = Field("Запустить")
bot_reload: str = Field("Перезапустить")
bot_kill: str = Field("Остановить")
save: str = Field("Сохранить")
logs: str = Field("Логи")
config_error_alert: str = Field("Конфиг заполнен неверно")
class WebConfig(BaseModel):
_instance: "WebConfig" = None
config: Config = Field(Config(), title="Конфиг")
phrases: Phrases = Field(Phrases(), title="Фразы")
@classmethod
async def load(cls) -> "WebConfig":
if not config_path.exists():
config_path.touch()
async with aiofiles.open(config_path, "r", encoding="utf-8") as f:
try:
json_content = json.loads(await f.read())
except json.JSONDecodeError:
json_content = {}
return cls.parse_obj(json_content)
async def save(self):
async with aiofiles.open(config_path, "w", encoding="utf-8") as f:
await f.write(self.json())
| [
"[email protected]"
] | |
990c0d14f1a9a11941085c4fae1209efd43555c4 | e9988eb38fd515baa386d8b06bb7cce30c34c50d | /sitevenv/lib/python2.7/site-packages/django/utils/translation/trans_real.py | 6ab071dabaf28cf2d985efd0f10d8189984cabaf | [] | no_license | Arrrrrrrpit/Hire_station | 8c2f293677925d1053a4db964ee504d78c3738d8 | f33f044628082f1e034484b5c702fd66478aa142 | refs/heads/master | 2020-07-01T01:24:18.190530 | 2016-09-25T20:33:05 | 2016-09-25T20:33:05 | 201,007,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,622 | py | """Translation helper functions."""
from __future__ import unicode_literals
import gettext as gettext_module
import os
import re
import sys
import warnings
from collections import OrderedDict
from threading import local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.safestring import SafeData, mark_safe
from django.utils.six import StringIO
from django.utils.translation import (
LANGUAGE_SESSION_KEY, TranslatorCommentWarning, trim_whitespace,
)
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = re.compile(r'^/([\w@-]+)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower() + '_' + language[p + 1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = 'django'
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.set_output_charset('utf-8') # For Python 2 gettext() (#25720)
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
if self.domain == 'django':
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
# default lang should have at least one translation file available.
raise IOError("No translation files found for default language %s." % settings.LANGUAGE_CODE)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = {}
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Returns a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
codeset='utf-8',
fallback=use_null_fallback)
def _init_translation_catalog(self):
"""Creates a base catalog using global django translations."""
settingsfile = upath(sys.modules[settings.__module__].__file__)
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merges translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merges translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Sets the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
if self.domain == 'django':
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, '_catalog', None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = other._catalog.copy()
else:
self._catalog.update(other._catalog)
def language(self):
"""Returns the translation language."""
return self.__language
def to_language(self):
"""Returns the translation language name."""
return self.__to_language
def translation(language):
"""
Returns a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetches the translation object for a given language and installs it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
# force unicode, because lazy version expects unicode
result = force_text(message)
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
@lru_cache.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
@lru_cache.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in an OrderedDict for easy lookups by key.
"""
return OrderedDict(settings.LANGUAGES)
@lru_cache.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(
# Match the trans 'some text' part
r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))"""
# Match and ignore optional filters
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
# Match the optional context part
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
)
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template.base import (
Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT,
TRANSLATOR_COMMENT_MARK,
)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO('')
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
# Adding the u prefix allows gettext to recognize the Unicode string
# (#26093).
raw_prefix = 'u' if six.PY3 else ''
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext({p}{!r}, {p}{!r}, {p}{!r},count) '.format(
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
else:
out.write(' ngettext({p}{!r}, {p}{!r}, count) '.format(
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context,
join_tokens(singular, trimmed),
p=raw_prefix,
))
else:
out.write(' gettext({p}{!r}) '.format(
join_tokens(singular, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = (
"The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't "
"the last item on the line."
) % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace('%', '%%')
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context, g, p=raw_prefix
))
message_context = None
else:
out.write(' gettext({p}{!r}) '.format(g, p=raw_prefix))
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| [
"[email protected]"
] | |
a5beb263c9fa0069b8b9ef256681716929079437 | 5ee8a69917ad64c2dd09b7840b7a8d03d92fd884 | /posts/views.py | 06e5beaecdefbea7305e8a43000876adbc5b9e27 | [] | no_license | ylavinia/SimpleSocialForum | 655d76599f31930813647e8e25e8a10d41e05595 | 97f905745277e345d1feb88899dec754769c230e | refs/heads/master | 2021-01-21T22:02:01.067175 | 2017-06-22T21:20:57 | 2017-06-22T21:20:57 | 95,152,805 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | from django.shortcuts import render
from django.contrib import messages
# Create your views here.
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse_lazy
from django.http import Http404
from django.views import generic
from braces.views import SelectRelatedMixin
from . import models
from . import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class PostList(SelectRelatedMixin, generic.ListView):
model = models.Post
select_related = ('user', 'group')
class UserPosts(generic.ListView):
model = models.Post
template_name = 'posts/user_post_list.html'
def get_queryset(self):
try:
self.post_user = User.objects.prefetch_related('posts').get(username__iexact=self.kwargs.get('username'))
except User.DoesNotExist:
raise Http404
else:
return self.post_user.posts.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['post_user'] = self.post_user
return context
class PostDetail(SelectRelatedMixin, generic.DetailView):
model = models.Post
select_related = ('user', 'group')
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user__username__iexact=self.kwargs.get('username'))
class CreatePost(LoginRequiredMixin, SelectRelatedMixin, generic.CreateView):
fields = ('message', 'group')
model = models.Post
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
return super().form_valid(form)
class DeletePost(LoginRequiredMixin, SelectRelatedMixin, generic.DeleteView):
model = models.Post
select_related = ('user', 'group')
success_url = reverse_lazy('posts:all')
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user_id = self.request.user.id)
def delete(self, *args, **kwargs):
messages.success(self.request, 'Post Deleted')
return super().delete(*args, **kwargs)
| [
"[email protected]"
] | |
1e3cd8346e9e5a4f5e2ac20f8de1707a62a60c29 | 34f91a492e1102d396f607e435586b2e610bf6cb | /tui_helper.py | 754f90dfe549e744e429b106fc3c3edd2add2490 | [] | no_license | billykong/python_cmd_timekeeper | 2986da76d8c772b0e700ae024ff5adf53de53a54 | 5139282092fda36d794df800829aa2d7651a9014 | refs/heads/master | 2021-01-20T07:43:12.436038 | 2017-05-02T13:24:40 | 2017-05-02T13:24:40 | 90,036,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | import controller
from tabulate import tabulate
import lang
def display_menu():
menu = """
Please choose one of the followings:
1. Show all unended task
2. Show today's task
3. Start a task
4. End a task
"""
print(menu)
def get_menu_input():
try:
return int(input("Enter one digit: "))
except ValueError:
print(lang.unrecognised)
get_menu_input()
def process_menu_input(user_input):
if user_input == 1:
show_all_unended_tasks()
elif user_input == 2:
show_all_subjects_of_today()
elif user_input == 3:
start_a_task()
elif user_input == 4:
end_a_task()
else:
print(lang.unavailable)
def show_all_unended_tasks():
unended_subjects = controller.get_all_unended_subjects()
# (13, 'quoted', 'None', '19:07:25', None)
print(tabulate(unended_subjects, headers=["id", "subject", "desc", "start-time", "end-time"]))
return unended_subjects
def show_all_subjects_of_today():
today_subjects = controller.get_all_subjects_of_today()
print(tabulate(today_subjects, headers=["id", "subject", "desc", "start-time", "end-time"]))
return today_subjects
def start_a_task():
subject = input("Please enter task subject: ")
note = input("Please enter task description: ")
task = controller.start_subject(subject, note)
print("Task started: ", task)
def end_a_task():
unended_subjects = show_all_unended_tasks()
try:
some_id = int(input("Please choose a task to end: "))
subject = [x for x in unended_subjects if x[0] == some_id]
if len(subject) != 1:
raise ValueError(lang.unavailable)
note = subject[0][2] + "; "
note += input("Do you want to append some description: ")
subject = controller.end_subject(some_id, note)
print("Task ended: ", subject)
except ValueError:
print(lang.unrecognised)
| [
"[email protected]"
] | |
f6721ea11faae85216d3a60224be9c9ec0207242 | 3fc4bca70bb817d8c14c2e6eaf29cda765e8cf19 | /venv/bin/pip3.6 | 548512482798bf15606d78d042dcc9e3e66cd6e0 | [] | no_license | OseiasBeu/ExemploDjango | 4841412dca9e15613671951cdcb18ea8d18ff68a | 31e4dfb12b84d217ff989bd7fc0fa5636312b8c4 | refs/heads/master | 2020-03-28T16:05:10.873540 | 2018-09-13T15:22:59 | 2018-09-13T15:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | 6 | #!/home/OseiasBeu/Documents/LPs/django/projeto/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
cc5a08297b64e06ec98a66b7b3640257b6c4b479 | fd63b57b1aa6620e96d2803b27c0d626e086812e | /logic.py | 4ef8d1907a78b1f20cf1f2d4df99dd3dbaec1a72 | [] | no_license | amitkumarx86/scrap | d17e405b467c2ff671ea354fd2fce589ebdbb466 | 8c23b54223c1ae2e07eb17567fabda8a3a0ec5dc | refs/heads/master | 2021-01-20T19:14:25.144989 | 2016-06-02T18:32:06 | 2016-06-02T18:32:06 | 60,284,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py |
#who = sys.argv[1]
#a=1
#b=2
#c=who
#print a+":"+b+":"+who
import requests
import datetime
import re
import sys
from bs4 import BeautifulSoup
url = sys.argv[1]
url = url + 'issues'
#print url
# url = ' ' + 'issues'
# url = 'https://github.com/Shippable/cexec/' + 'issues'
r = requests.get(url)
soup = BeautifulSoup(r.text)
# Retrieve all the page no where issues are listed
import urlparse
pages = {}
BASE_URL = 'https://github.com'
# This will generate a dictionary with the page index as key and url as the value
for all_links in soup.findAll({'div'}, {'class': 'pagination'}):
for link in all_links.findAll('a'):
page_index = str(link.extract().text)
if page_index not in pages.keys() and page_index != 'Next' :
pages[page_index] = urlparse.urljoin(BASE_URL, link.attrs['href'])
break
# for page, link in sorted(pages.items()):
# print page, link
# Scrap the first Issues page
issue_timestamp_dict = {}
def crawler(soup):
for value in soup.findAll({'span'},{'class' : 'issue-meta-section opened-by'}):
issue_no = re.search(r'(#\d+)',str(value.extract().text),re.M|re.I).group()
# Store all datetime stamp for every listed issue in the current page [page 1] to a dictionary
for time in value.findAll('relative-time'): # this loop runs for only one iteration
# Clean the timestamp from this '2016-06-01T04:39:22Z' to this '2016-06-01 04:39:22'
# and store it as datetime object so it could be operated upon
date = time.get('datetime').strip('Z').split('T')
date = datetime.datetime.strptime(' '.join(date), '%Y-%m-%d %H:%M:%S')
# If the dictionary is empty create a new index key and add current date as its value
if issue_no not in issue_timestamp_dict.keys():
issue_timestamp_dict[issue_no] = date
# print issue_no, date
break
return issue_timestamp_dict
crawler(soup)
# Scrap the other pages (if exists)
# for page, link in sorted(pages.items()):
# print page, link
if bool(pages): # if other pages exists, scrap them too
for p_link in sorted(pages.values()):
req = requests.get(p_link)
soup = BeautifulSoup(req.text)
# call the crawler function with the BeautifulSoup object
# and it will populate the dictionary items with new issue_no
# and timestamp values
crawler(soup)
from operator import itemgetter
# - Number of open issues that were opened in the last 24 hours
count_24 = 0
# - Number of open issues that were opened more than 24 hours ago but less than 7 days ago
count_24_7 = 0
# - Number of open issues that were opened more than 7 days ago
count_m7 = 0
# reverse sort the dictionary according to its value and display the result
for k,v in sorted(issue_timestamp_dict.items(), reverse=True, key=itemgetter(1)):
# Geting Present time (Now) in proper format
now = datetime.datetime.now()
now = now.strftime("%Y-%m-%d %H:%M:%S")
present_time = datetime.datetime.strptime(now, '%Y-%m-%d %H:%M:%S')
# Computing the difference b/w present time and timestamp value
time_diff = present_time - v
if time_diff.days == 0:
count_24 += 1
# print 'open issues in the last 24 hours:', k, time_diff
elif time_diff.days in xrange(1,7):
count_24_7 += 1
# print 'open issues in the last 24 hours but less than 7 days:', k, time_diff
elif time_diff.days >= 7:
count_m7 += 1
open_issue_count = len(issue_timestamp_dict)
# print 'Open Issues: ', open_issue_count
# print 'open issues in the last 24 hours:',count_24
# print 'open issues in the last 24 hours but less than 7 days:',count_24_7
# print 'open issues that were opened more than 7 days ago:',count_m7
print str(open_issue_count) + ':' + str(count_24) +':' + str(count_24_7) + ':' + str(count_m7)
| [
"[email protected]"
] | |
5b8ac99e89351630dfc8090ec60540e6aca0a12d | 427bc8a6d7769ea2da8f86a41e704b3d3831a93a | /main.py | 83c95a472bcd0de905eb85e90d475788567bba5e | [
"Apache-2.0"
] | permissive | BoonWei/BPNN | 9ca168e9d43e79a769973286d5eb66864931936a | c5964064910110b5737535ad6ea2a4f79aa46e9b | refs/heads/master | 2023-08-24T05:26:50.363805 | 2021-09-17T04:03:54 | 2021-09-17T04:03:54 | 125,953,881 | 0 | 0 | null | 2021-07-25T08:17:30 | 2018-03-20T03:13:46 | Python | UTF-8 | Python | false | false | 1,459 | py | import pandas as pd
import createdata
import numpy as np
import bp
import random
import matplotlib.pyplot as plt
if __name__ == '__main__':
path = 'D:\homework\picture\picall'
createdata.createcsv(path)
f = open('labels.txt', 'r')
dataset = f.readlines()
f.close()
for i in range(len(dataset)):
dataset[i] = dataset[i].rstrip().split(',')
for j in range(5):
dataset[i][j] = float(dataset[i][j])
train_dataset = dataset[:210]
test_dataset = dataset[210:]
x_train = []
y_train = []
for data in train_dataset:
x_train.append(data[:-1])
if data[5][:1] == 'g':
y_train.append([1, 0, 0])
if data[5][:1] == 'n':
y_train.append([0, 1, 0])
if data[5][:1] == 'r':
y_train.append([0, 0, 1])
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = []
y_test = []
for data in train_dataset:
x_test.append(data[:-1])
if data[5][:1] == 'g':
y_test.append([1, 0, 0])
if data[5][:1] == 'n':
y_test.append([0, 1, 0])
if data[5][:1] == 'r':
y_test.append([0, 0, 1])
x_test = np.array(x_test)
y_test = np.array(y_test)
net = bp.NNetWork([5, 19, 3])
net.train(x_train, y_train)
acc = bp.ComputeAccuracy(net, x_test, y_test)
print('Accuracy is:', acc)
plt.show()
| [
"[email protected]"
] | |
c91598dd70f6a8c8881bedebf5012d8f0cec9568 | 43cd2ed4873b194bb8726536f109eb3ad6a23251 | /server/hieratika/monitor.py | 2ede936f044cea6a4a537f785f1b6675953a9904 | [] | no_license | aneto0/hieratika | c08b23ee53fb50f28b58e1ca45efd741a34c78a6 | 6740a5a9b1f17eb8d0d4b75f31ac70a2d76bd061 | refs/heads/master | 2021-07-13T07:46:40.558337 | 2018-03-22T10:07:01 | 2018-03-22T10:07:01 | 101,903,167 | 1 | 3 | null | 2021-02-17T08:43:17 | 2017-08-30T16:16:11 | Python | UTF-8 | Python | false | false | 3,225 | py | #!/usr/bin/env python
__copyright__ = """
Copyright 2017 F4E | European Joint Undertaking for ITER and
the Development of Fusion Energy ('Fusion for Energy').
Licensed under the EUPL, Version 1.1 or - as soon they will be approved
by the European Commission - subsequent versions of the EUPL (the "Licence")
You may not use this work except in compliance with the Licence.
You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
Unless required by applicable law or agreed to in writing,
software distributed under the Licence is distributed on an "AS IS"
basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the Licence permissions and limitations under the Licence.
"""
__license__ = "EUPL"
__author__ = "Andre' Neto"
__date__ = "22/12/2017"
##
# Standard imports
##
from abc import ABCMeta, abstractmethod
import json
import logging
##
# Project imports
##
##
# Logger configuration
##
log = logging.getLogger("{0}".format(__name__))
##
# Class definition
##
class HieratikaMonitor(object):
""" Abstract class for any live variable monitor implementation.
TODO
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def load(self, config):
""" Configures the monitor against a set of parameters. This set of parameters is specific for each transformation implementation.
Args:
config(ConfigParser): the monitor specific implementation parameters are in the section "live-impl".
Returns:
True if the monitor is successfully configured.
"""
pass
@abstractmethod
def getLiveVariablesInfo(self, requestedVariables):
""" Returns all the available information (and meta-information) for all of the requestedVariables.
Args:
requestedVariables ([str]): identifiers of the variables to be queried.
Returns:
A list of Variables ([Variable]) with all the information available for each of the request variables
"""
pass
def loadCommon(self, config):
""" Loads parameters that are common to all monitor implementations.
NOOP as of today.
Args:
config (ConfigParser): parameters that are common to all authenticate implementations:
NONE as of today.
Returns:
True
"""
return True
def setServer(self, server):
""" Sets the server implementation.
Args:
server (Server): the server implementation.
"""
self.server = server
def update(self, variables):
""" To be called any time any of the variable monitored by this implementation has changed.
Args:
variables ({variableName1:value1, variableName2:value2, ...}): dictionary with the variables that have been updated.
Returns:
None
"""
toStream = {
"live": True,
"variables": variables
}
log.debug("Streaming {0}".format(toStream))
self.server.queueStreamData(json.dumps(toStream))
| [
"[email protected]"
] | |
a6c62e3506967f5d10df06b53bdadb1dcb04ff74 | 540705d5879734cf5235b6fe892d6cb8f03a0a05 | /day1/dna_counts.py | afff5b1c7d0ebf0f0d288bf90d3cbdd6ef7b2365 | [] | no_license | ChenYongyan-uu/1 | 16e3f7e6944925f12494d0853fa73d0fc6cdb4d0 | 07c1b0ca5f04e6b84bf890400fc38e90a0acf0d3 | refs/heads/master | 2022-02-02T01:32:52.801289 | 2022-01-17T20:31:55 | 2022-01-17T20:31:55 | 226,325,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | def dna_counts(the_file_name, your_name = "user"):
dna_file = open(the_file_name,"r")
dna_data = []
for line in dna_file:
line = line.rstrip()
dna_data += [line]
dna_file.close()
print("Hello " + your_name)
no_of_sequences = len(dna_data)
print("There are " + str(no_of_sequences) + " sequences in this file")
bp_count = []
for xyz in dna_data:
bp_count += [len(xyz)]
print("The average length is " + str(sum(bp_count)/no_of_sequences) + " basepairs")
print("The longest sequence is " + str(max(bp_count)) + " basepairs")
print("The shortest sequence is " + str(min(bp_count)) + " basepairs")
import matplotlib.pyplot as plt
nuc_count = []
for xyz in dna_data:
nuc = {"a":xyz.count("a"),"t":xyz.count("t"),"c":xyz.count("c"),"g":xyz.count("g")}
nuc_count += [nuc]
x0 = [1.0,2.0,3.0,4.0]
gene0 = nuc_count[0].values()
plt.bar( x0,gene0 ,width=.25)
x1 = [1.25,2.25,3.25,4.25]
gene1 = nuc_count[1].values()
plt.bar(x1, gene1,width=.25)
x2 = [1.5,2.5,3.5,4.5]
gene2 = nuc_count[2].values()
plt.bar(x2,gene2,width=.25)
plt.xticks(x1 , ('a', 't', 'c', 'g'))
plt.ylabel('Nucleotide count')
plt.title('Nucleotide per sequence')
plt.legend(labels = ["gene1","gene2","gene3"],loc='best')
| [
"[email protected]"
] | |
1340d0d10842a305d175f3f789088bd4f7e18bf1 | 193a87875097e10bc518581a63ecaacb1f4bd131 | /Aula4 - Vetores e estrutura de repetição/usuarioSenha.py | ba39f74d45823f41cbf1b74808ac8446213da690 | [] | no_license | jaugustomachado/Curso-Next-Python | f9be5fabf445cc79bb252b811be237fb4d706548 | 2d50785b78781fb166b61dc586d6e60bf9504788 | refs/heads/main | 2023-08-13T13:47:15.171016 | 2021-10-01T21:09:09 | 2021-10-01T21:09:09 | 404,890,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #Faça um programa que leia um nome de usuário e a sua senha e não aceite
#a senha igual ao nome do usuário, mostrando uma mensagem de erro
#e voltando a pedir as informações.
usuario=input('informe o usuário: ')
senha= input('informe a senha: ')
while usuario == senha:
print('usuário e senha não podem ser iguais, favor informar novamente os dados: ')
usuario=input('informe o usuário: ')
senha= input('informe a senha: ')
else:
print('dados aceitos com sucesso') | [
"[email protected]"
] | |
e32bd0130a28604d940e0a1e7d79496057d8a0cb | 66a9c25cf0c53e2c3029b423018b856103d709d4 | /tests/live_test.py | b71930af68b02cc6137cb3b01a6f80f39c0ef9f3 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | fritzy/SleekXMPP | 1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf | cc1d470397de768ffcc41d2ed5ac3118d19f09f5 | refs/heads/develop | 2020-05-22T04:14:58.568822 | 2020-02-18T22:54:57 | 2020-02-18T22:54:57 | 463,405 | 658 | 254 | NOASSERTION | 2023-06-27T20:05:54 | 2010-01-08T05:54:45 | Python | UTF-8 | Python | false | false | 3,422 | py | import logging
from sleekxmpp.test import *
class TestLiveStream(SleekTest):
"""
Test that we can test a live stanza stream.
"""
def tearDown(self):
self.stream_close()
def testClientConnection(self):
"""Test that we can interact with a live ClientXMPP instance."""
self.stream_start(mode='client',
socket='live',
skip=False,
jid='user@localhost/test',
password='user')
# Use sid=None to ignore any id sent by the server since
# we can't know it in advance.
self.recv_header(sfrom='localhost', sid=None)
self.send_header(sto='localhost')
self.recv_feature("""
<stream:features>
<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
<mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">
<mechanism>DIGEST-MD5</mechanism>
<mechanism>PLAIN</mechanism>
</mechanisms>
</stream:features>
""")
self.send_feature("""
<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
""")
self.recv_feature("""
<proceed xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
""")
self.send_header(sto='localhost')
self.recv_header(sfrom='localhost', sid=None)
self.recv_feature("""
<stream:features>
<mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">
<mechanism>DIGEST-MD5</mechanism>
<mechanism>PLAIN</mechanism>
</mechanisms>
</stream:features>
""")
self.send_feature("""
<auth xmlns="urn:ietf:params:xml:ns:xmpp-sasl"
mechanism="PLAIN">AHVzZXIAdXNlcg==</auth>
""")
self.recv_feature("""
<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl" />
""")
self.send_header(sto='localhost')
self.recv_header(sfrom='localhost', sid=None)
self.recv_feature("""
<stream:features>
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind" />
<session xmlns="urn:ietf:params:xml:ns:xmpp-session" />
</stream:features>
""")
# Should really use send, but our Iq stanza objects
# can't handle bind element payloads yet.
self.send_feature("""
<iq type="set" id="1">
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">
<resource>test</resource>
</bind>
</iq>
""")
self.recv_feature("""
<iq type="result" id="1">
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">
<jid>user@localhost/test</jid>
</bind>
</iq>
""")
self.stream_close()
suite = unittest.TestLoader().loadTestsFromTestCase(TestLiveStream)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
tests = unittest.TestSuite([suite])
result = unittest.TextTestRunner(verbosity=2).run(tests)
test_ns = 'http://andyet.net/protocol/tests'
print("<tests xmlns='%s' %s %s %s %s />" % (
test_ns,
'ran="%s"' % result.testsRun,
'errors="%s"' % len(result.errors),
'fails="%s"' % len(result.failures),
'success="%s"' % result.wasSuccessful()))
| [
"[email protected]"
] | |
71bfd188e3307f50316b5807460e05e6b0dab81e | 0be27c0a583d3a8edd5d136c091e74a3df51b526 | /int_long.py | 09d9178607925a32fd93bcf2ea90ca80acb00f96 | [] | no_license | ssangitha/guvicode | 3d38942f5d5e27a7978e070e14be07a5269b01fe | ea960fb056cfe577eec81e83841929e41a31f72e | refs/heads/master | 2020-04-15T05:01:00.226391 | 2019-09-06T10:08:23 | 2019-09-06T10:08:23 | 164,405,935 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n=int(input())
if(n>=-2**15+1 and n<=2**15+1):
print ("INT")
elif n>=-2**31+1 and n<=2**31+1:
print("LONG")
else:
print ("LONG LONG")
#..int,long...longlong
| [
"[email protected]"
] | |
0b08075f38eedd6834e8eb3bf7563882dbd171af | 558af6d8842d3f8c5292346d76c01ed6ac246789 | /project1/wiki/encyclopedia/views.py | ae9d74cea1d834e4c144477e72f63f6d66da3c92 | [] | no_license | anhsirk0/CS50web-2020 | 81497bc0d594ae80f2a261ec06fc2be2f2f191d8 | eee1239bda5d9edc566460bd874fa65ea7ae67cb | refs/heads/master | 2023-04-12T02:18:06.763262 | 2021-05-20T05:45:01 | 2021-05-20T05:45:01 | 360,774,319 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | import glob, random
import markdown2
from django.shortcuts import render, redirect
from django.http import HttpResponse
from . import util
def index(request):
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries()
})
def entry(request, name):
ls = glob.glob("entries/*.md")
html = f"<h1>Entry page for '{name}' does not not exist</h1>"
for item in ls:
file = item.lower().split("/")[-1]
if name.lower() == file.split(".")[0]:
content = open(item).read()
html = markdown2.markdown(content)
return render(request, "encyclopedia/entry.html", {"content": html, "title": name})
return HttpResponse(html)
def search(request):
q = request.GET.get('q')
query = q.lower()
ls = glob.glob("entries/*.md")
new = []
for file in ls:
name = file.split("/")[-1].split(".")[0]
# exact match
if query == name.lower():
content = open(file).read()
result = markdown2.markdown(content)
return redirect(entry,name=query)
# partial match
l = len(query)
n = len(name)
if not l > n:
for i in range(n - l+1):
if query == name.lower()[i:i+l]:
new.append(name)
break
if new:
content = f"<h1>Search Results for ' {q} '</h1>"
return render(request, "encyclopedia/search.html", {"content": content,"entries": new})
# no match
result = f"<h1>No match found for ' {q} '</h1>"
return render(request, "encyclopedia/search.html", {"content": result})
def new(request):
if request.method == 'POST':
title = request.POST.get('title')
title = title[0].upper()+title[1:]
content = request.POST.get('content')
util.save_entry(title, content)
return redirect(entry,name=title)
entries = util.list_entries()
page_title = "New - Encyclopedia"
return render(request, "encyclopedia/new.html", {
"page_title": page_title ,
"entries": entries
})
def edit(request):
title = request.GET.get("title")
content = util.get_entry(title)
entries = util.list_entries()
entries.remove(title)
page_title = f"Edit - {title} - Encyclopedia"
return render(request, "encyclopedia/edit.html", {
"entries": entries,
"page_title": page_title,
"title": title,
"content": content
})
def random_page(request):
entries = util.list_entries()
title = random.choice(entries)
return redirect(entry,name=title)
| [
"[email protected]"
] | |
294b4e24dbc704233648dbe74cabe2a45921a0c7 | 2c377e5b4f2e67cd02c24c253a038cf4bcf546e6 | /navbuilder/tests/test_admin.py | 724cdb0862fd25c9b2edbe01e160204d78113ea5 | [] | permissive | praekelt/django-navbuilder | 9694d3c3c65b0b7c2633e15712302b5f9cf1e47f | 0524aca22f2a8be2b1f55770cdc1b6c5dee9916d | refs/heads/develop | 2021-06-23T11:16:11.992949 | 2018-05-10T12:55:38 | 2018-05-10T12:55:38 | 65,980,831 | 0 | 0 | BSD-3-Clause | 2021-06-10T20:16:28 | 2016-08-18T08:45:28 | Python | UTF-8 | Python | false | false | 1,470 | py | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.client import Client
from navbuilder.tests.test_base import load_fixtures
class AdminTestCase(TestCase):
def setUp(self):
load_fixtures(self)
self.client = Client()
self.editor = get_user_model().objects.create(
username="editor",
email="[email protected]",
is_superuser=True,
is_staff=True
)
self.editor.set_password("password")
self.editor.save()
self.client.login(username="editor", password="password")
def test_admin(self):
response = self.client.get("/admin/")
self.assertEqual(response.status_code, 200)
def test_admin_menu(self):
response = self.client.get("/admin/navbuilder/menu/")
self.assertEqual(response.status_code, 200)
response = self.client.get("/admin/navbuilder/menu/add/")
self.assertEqual(response.status_code, 200)
def test_admin_menuitem(self):
response = self.client.get("/admin/navbuilder/menuitem/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.menuitem.title)
self.assertContains(response, self.menuitem.link.get_absolute_url())
response = self.client.get("/admin/navbuilder/menuitem/add/")
self.assertEqual(response.status_code, 200)
def tearDown(self):
self.client.logout()
| [
"[email protected]"
] | |
b9d4afd84040448b1f03dc8701452126429a079b | 2ba3d16bf0aab3cb9478ba145749575724373d66 | /testingAndReadingSensors/ecReader.py | 7f4439edcb470a26fe298c6e7f559a5f7482bfce | [] | no_license | phadjian/hydroponics | 0316be4e094007dacb07d71f4f2ee373e101f874 | 41f2e9e9de91ef73a9159d6ec139fb755582dccf | refs/heads/master | 2021-02-16T17:49:15.426964 | 2020-05-04T19:20:26 | 2020-05-04T19:20:26 | 245,031,128 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | # ecReader.py
# Gia Trinh
# ecSim.py
# Gia Trinh
# Input: probe --> float
# Output: I2C --> float
# Input & output range: 0.07 - 500000 μS/cm
# Returns a value in μS/cm
def ec(value):
return value
| [
"[email protected]"
] | |
89189e31f7eff193f8991a28da369417a28ae86d | 68cd659b44f57adf266dd37789bd1da31f61670d | /D2/D2_20190715파리퇴치.py | 37273d877cfb93458b8b8fdef4531e610039777c | [] | no_license | 01090841589/solved_problem | c0c6f5a46e4d48860dccb3b0288aa5b56868fbca | bbea2f31e5fe36cad100bc514eacd83545fb25b1 | refs/heads/master | 2023-07-02T23:55:51.631478 | 2021-08-04T13:57:00 | 2021-08-04T13:57:00 | 197,157,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | def arrr(N) :
for i in range(N) :
inp = input().split(' ')
inp = [int(j) for j in inp]
fly.append(inp)
return fly
def max_cal(fly,N,M):
sum_num = 0
max_num = 0
for i in range(N-M+1) :
for j in range(N-M+1) :
for l in range(M) :
for m in range(M) :
sum_num += fly[l+i][m+j]
if max_num < sum_num :
max_num = sum_num
sum_num = 0
return(max_num)
T = int(input())
for a in range(T):
N = input().split(' ')
fly = []
fly = arrr(int(N[0]))
print('#{0} {1}'.format(a+1, max_cal(fly,int(N[0]),int(N[1])))) | [
"[email protected]"
] | |
f21360c68557a49b1b4e4413627b85cd6737f75c | 73c9211d5627594e0191510f0b4d70a907f5c4c5 | /nn/keras_dataguru/lesson2/work2.py | 4dcacb8f30dd2feaffbd330256b8915e94435bcf | [] | no_license | tigerxjtu/py3 | 35378f270363532fb30962da8674dbcee99eb5ff | 5d24cd074f51bd0f17f6cc4f5f1a6e7cf0d48779 | refs/heads/master | 2021-07-13T05:34:15.080119 | 2020-06-24T09:36:33 | 2020-06-24T09:36:33 | 159,121,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import keras
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
# In[2]:
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print('x_shape:',x_train.shape) #(60000,28,28)
print('y_shape:',y_train.shape) #(60000,)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)
# In[8]:
# model=Sequential([Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')])
model=Sequential()
model.add(Dense(units=256,input_dim=x_train.shape[1],activation='relu'))
model.add(Dense(units=10,activation='softmax'))
sgd=SGD(lr=0.2)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
# In[9]:
model.fit(x_train,y_train,batch_size=32,epochs=10)
loss,accuracy=model.evaluate(x_test,y_test)
print('\ntest loss:',loss)
print('accuracy:',accuracy)
# In[ ]:
| [
"[email protected]"
] | |
3b425803ed4b6c2b1925c1f13d5ef31cf452bcc1 | 1998571941ae4c0d228abc8fae56f6aa449740f2 | /INFOQ2/Projet final/intersection.py | bd569ab429ad45f4d355753d7f44c32c58cbd57e | [] | no_license | AndruOn/UCL-BAC1 | 4ca148c2154bb948eb0753391afe2581d7df2873 | d15f75d1c85d823814e4f5e47c0c671c83e15a1f | refs/heads/master | 2020-05-17T03:17:16.207133 | 2019-12-22T13:12:02 | 2019-12-22T13:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py |
def intersection(list):
def distance(v):
d=int((5000-v)**(1/2)) #une estimation de la distance (unités ??) du stylo à la bobine en fonction de la valeur renvoyée par l'arduino (entre 0 et 1024). On doit améliorer cette approximation !
return d
v1,v2,v3=0 #les valeurs renvoyés par l'arduino pour chaque bobine.
#il est recommandé que les trois bobines de référence prises dans le programme soient les bobines recvant le plus gros voltage sinon il pourrait y avoir des bugs ...
#la bobine 1 est celle qui forme l'angle droit avec les autres bobines. Elle est prise comme point (0,0). Dans les calculs on considère les axes x et y usuels (y vertical et x horizontal). La bobine 2 est sur l'axe y tandis que la bobine 3 est sur l'axe x.
d1=distance(v1) #Ici on a donc une estimation de à quelle distance se trouve le stylo par rapport à chacune des trois bobines.
d2=distance(v2) #Comme on est dans l'espace, ça nous donne 3 sphères, une autour de chaque bobine.
d3=distance(v3) #L'intersection des 3 sphères donne l'endroit où est le stylo.
k=1 #distance (unités ?) entre les bobines.
x=(d1**2-d3**2)/2*k+k/2
y=(d1**2-d2**2)/2*k+k/2 #ici on a le point (x,y) de où est le stylo S'IL Y A UNE INTERSECTION. S'il n'y a pas d'intersection, la formule renvoie une valeur mais qui n'a pas de réalité physique !
z1=(d1**2-x*2-y**2)**(1/2) #normalement si tout va bien le z est positif... et pas complexe ! (S'il est complexe le programme bug donc attention, mais normalement ça ne doit pas arriver !)
z2=(d2**2-x**2-(y-k)**2)**(1/2) #Comme on sait que s'il n'y a pas d'interscetion on a quand même une valeur en (x,y) trompeuse, on regarde le z de chacune des sphères
z3=(d3**2-(x-k)**2-y**2)**(1/2) #Si le z est le même pour chacune avec (x,y) trouvé plus haut, alors l'intersection existe.
tol=0.1 # Evidement, les instruments ne sont pas parfaits, donc on garde une sensibilité, par exemple si z1 est proche de z2 sans être toutefois pafaitement égaux, on considère que l'intersection est bien réelle. (Unités dépend des unités pour k). On test la sensibilité à la ligne suivante.
if z1-2*tol<z2<z1+2*tol and z2-2*tol<z3<z2+2*tol: #On utilise deux fois tol car on fait deux estimations successives, les erreurs s'accumulent.
print("l'intersection est",x,y)
else:
print("on a",x,y,"mais ce n'est pas une vraie intersection") #Evidement, dans la réalité si l'approximation de la distance est correcte, vu que notre stylo est un point bien réel, alors on a toujours une intersection réelle. Cependant c'est tjs utile de checker si c'est correct ou non dans le cas où l'arduino fournirait une valeur bugée pour une des bobines, on considère que le stylo est tjs au point précédent et ça empêche des sauts incohérents dû aux erreurs de mesures.
| [
"[email protected]"
] | |
9049ee8a1909d30cd4468fc0ead2005d2df02c36 | df708c2eda9539f6f066fb6f448417ab9a392cb5 | /LinearRegression/Scikit_Linear_Validation.py | f658e3321035c632d1011faee637e861675e8514 | [] | no_license | TechMaster/LearnAI | bfc8c63e49ba0d70147cd0ff4be41b661e8f1cd0 | 490990a4647bef8f2b24a39b68a870c63c8e9a68 | refs/heads/master | 2020-07-05T20:13:45.973070 | 2019-09-18T14:28:49 | 2019-09-18T14:28:49 | 202,760,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
data = pd.read_csv('data_linear.csv').values
X = data[:, 0].reshape(-1, 1)
Y = data[:, 1].reshape(-1, 1)
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
model = LinearRegression()
reg = model.fit(X_train, Y_train)
result = model.score(X_test, Y_test)
print(f"Accuracy: {result * 100.0} %")
| [
"[email protected]"
] | |
a5ef54a9d6efffe51eb5934fc96e5ffe768014aa | 4f91c481cc946c2c45c5984322a00a5da9b22720 | /server/tests/test_login.py | 527905624197dc6fdb79459d845722d5eee76625 | [
"MIT"
] | permissive | JoeEmp/pi_web_file_system | 2d07ff95664ebfa154162c20df91d6713ae12800 | e252660c7b8f2d26853a43860da0a7fdd062e505 | refs/heads/main | 2023-05-30T15:41:25.913197 | 2021-06-13T15:26:01 | 2021-06-13T15:26:01 | 366,447,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import unittest
import requests
class login_case(unittest.TestCase):
def test_error_username(self):
username = '1'
data = {"username": username, 'password': 'raspberry'}
r = requests.post('http://127.0.0.1/login', data=data)
self.assertEqual(r.json()['msg'], '不存在该用户')
def test_error_password(self):
password = '1'
data = {"username": 'pi', 'password': password}
r = requests.post('http://127.0.0.1/login', data=data)
self.assertEqual(r.json()['msg'], '密码错误')
def test_smoke(self):
data = {"username": 'pi', 'password': 'raspberry'}
r = requests.post('http://127.0.0.1/login', data=data)
self.assertIn('token', r.json().keys()) | [
"hemingjie"
] | hemingjie |
f5a2ef71015d9c0200dc92159cec1251c7167ddd | b611b928dc02348a83fadb70d363d273cfb1f36a | /latent_bot/latent_dec.py | d4e83341dcc8d7f60caf49c6a1f9cf3141ebf403 | [] | no_license | m0n0ph1/malware_analysis | 5e1d8e6ae88a92c0806305ccfcebf3fc753e3240 | 817d2f6e8575f3308326062fa3af28c4b06c77bd | refs/heads/master | 2021-01-04T11:37:54.525465 | 2019-07-23T18:02:00 | 2019-07-23T18:02:00 | 240,529,928 | 2 | 0 | null | 2020-02-14T14:44:53 | 2020-02-14T14:44:53 | null | UTF-8 | Python | false | false | 3,178 | py | """latent_dec.py: Script for IDA Pro decoding Latent Bot's strings"""
__author__ = "hasherezade"
import idautils
lookup_table = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3E\x00\x00\x00\x3F\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x00\x00\x00\x00\x00\x00\x1A\x1B\x1C\x1D\x1E\x1F\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\x30\x31\x32\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
def transform_chunk(chunk, chunk_size):
if chunk_size > 4:
chunk_size = 4
result = 0
for i in range(0, chunk_size):
mchar = ord(chunk[i])
val = ord(lookup_table[mchar])
val = val << (0x6 * i)
result = result + val
return result
def append_transformed(o, val):
for i in range(0, 3):
bval = (val >> (i * 8)) & 0XFF
o.append(bval)
def process_chunks(s):
o = []
while s:
chunk = s[:4]
val = transform_chunk(chunk, len(chunk))
append_transformed(o, val)
s = s[4:]
return o
def is_in_charset(str1):
for c in str1:
cval = ord(c)
if (cval in range(ord('a'), ord('z')+1)):
continue
if (cval in range(ord('A'), ord('Z')+1)):
continue
if (cval in range(ord('0'), ord('9')+1)):
continue
if (cval == ord('+')):
continue
if (cval == ord('/')):
continue
else:
return False
return True
def isprint(cval):
if (cval in range(32,127)):
return True
return False
def xordec(data, modifier):
out_str = []
data_size = len(data)
for i in range(0, data_size):
result = (data[i] ^ (modifier >> 8)) & 0xFF
if isprint(result) == False:
break
out_str.append(chr(result))
modifier += data[i]
modifier *= 0x0CE6D
modifier += 0x058BF
return "".join(out_str)
def latent_decode(str1):
try:
data = process_chunks(str1)
dec_str = xordec(data, 0xBB8)
return dec_str
except :
print "Decoding failed"
return None
#main:
sc = idautils.Strings()
for s in sc:
if is_in_charset(str(s)) == False:
continue
decoded_string = latent_decode(str(s))
if decoded_string is not None and len(decoded_string) > 0:
print "%x: '%s' -> '%s'" % (s.ea, str(s), decoded_string)
MakeRptCmt(s.ea, decoded_string) #set the decoded as a comment
| [
"[email protected]"
] | |
6eaca6a201451c63a49e92fe9a8c3d1051b56082 | 9727810fb355e3d047bc02f006248eee45be5ebe | /experiments/test2.py | 74929f8bf2704e253433658905cc5a4e70211128 | [] | no_license | scipioni/cherry | e00d4433fdf173ed6259d9e141050b0c7f2132d3 | 14432503533723626a79063b5663b48b1b5a4ae5 | refs/heads/master | 2021-12-02T15:30:09.424412 | 2017-06-29T13:03:34 | 2017-06-29T13:03:34 | 90,058,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,237 | py | """
white balance: 2500 (no auto)
"""
import numpy as np
import cv2
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
import argparse
import collections
def draw_text(img, text, y=75, x=10, color=(255,255,0)):
cv2.putText(img, str(text), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
def nothing(x):
pass
class CvTimer(object):
def __init__(self):
self.last = time.time()
self.history = collections.deque(maxlen=10)
@property
def fps(self):
now = time.time()
self.history.append(1.0/(now - self.last))
self.last = now
return round(sum(self.history)/len(self.history))
@property
def avg_fps(self):
return sum(self.l_fps_history) / float(self.fps_len)
class Ciliegia:
def __init__(self):
self.x = 0
self.y = 0
self.area = 0
self.vs = []
self.last = time.time()
self.fired = False
def update(self, img, hull, x, y, area, y_min):
posizione = float(y-y_min)/y_min
if posizione > 0.8:
print("nuova ciliegia...")
self.fired = False
now = time.time()
v = float(self.y-y)/(now-self.last)
self.x = x
self.y = y
self.last = now
if 6 < v < 1000:
self.vs.append(v)
draw_text(img, area, y, x-20)
draw_text(img, int(self.get_v()), y-50, x, color=(0,0,255))
cv2.drawContours(img, [hull], -1, (0,255,0), 2)
if not self.fired and posizione < 0.6 and v > 6:
self.fire()
def get_v(self):
if not self.vs:
return 0
return sum(self.vs)/len(self.vs)
def fire(self):
self.fired = True
v = self.get_v()
if not v:
return
print(" FIRE v=%d vs=%s" % (v, self.vs))
self.vs = []
class Detector:
def __init__(self, raspberry=True, show=False, filename=''):
self.show = show
self.timer = CvTimer()
self.raspberry = raspberry
self.whitebalance = 1.9
self.ciliegia = Ciliegia()
if raspberry:
self.camera = PiCamera()
self.camera.resolution = (640, 480)
self.camera.framerate = 30
self.camera.shutter_speed = self.camera.exposure_speed
print("exposure_speed=%s" % self.camera.exposure_speed)
self.camera.exposure_mode = 'off'
g = self.camera.awb_gains
self.camera.awb_mode = 'off'
print("gains=", g)
self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
else:
self.cap = cv2.VideoCapture(filename or 0)
#print("framerate %s" % self.cap.get(cv2.cv.CV_CAP_PROP_FPS))
red = 0 # cv2.cvtColor(np.uint8([[[255,0,0]]]), cv2.COLOR_BGR2HSV)[0]
self.ranges = [
[ np.array([115,180,0]), np.array([180,255,255])],
# [ np.array([0,115,110]), np.array([20,255,234])],
]
cv2.namedWindow('image')
range_ = self.ranges[0]
cv2.createTrackbar('hue_min','image',range_[0][0],180,nothing)
cv2.createTrackbar('hue_max','image',range_[1][0],180,nothing)
cv2.createTrackbar('saturation_min','image',range_[0][1],255,nothing)
cv2.createTrackbar('saturation_max','image',range_[1][1],255,nothing)
cv2.createTrackbar('value_min','image',range_[0][2],255,nothing)
cv2.createTrackbar('value_max','image',range_[1][2],255,nothing)
cv2.createTrackbar('white_balance','image', int(round(self.whitebalance*10)),80,nothing)
def erode(self, img, kernel=5):
kernel_ = np.ones((kernel,kernel),np.uint8)
return cv2.erode(img, kernel_, iterations = 1)
def dilate(self, img, kernel=5):
kernel_ = np.ones((kernel,kernel),np.uint8)
return cv2.dilate(img, kernel_, iterations = 1)
def show_hsv(self, hsv):
hue, saturation, value = cv2.split(hsv)
cv2.imshow('hue', hue)
cv2.imshow('saturation', saturation)
cv2.imshow('value', value)
def detect_object(self, img):
#denoise = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
#small = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#cv2.imshow('hsv', hsv)
#self.show_hsv(hsv)
range_ = self.ranges[0]
range_[0][0] = cv2.getTrackbarPos('hue_min','image')
range_[1][0] = cv2.getTrackbarPos('hue_max','image')
range_[0][1] = cv2.getTrackbarPos('saturation_min','image')
range_[1][1] = cv2.getTrackbarPos('saturation_max','image')
range_[0][2] = cv2.getTrackbarPos('value_min','image')
range_[1][2] = cv2.getTrackbarPos('value_max','image')
mask1 = cv2.inRange(hsv, *range_)
#mask2 = cv2.inRange(hsv, *self.ranges[1])
#mask1 = self.dilate(self.erode(mask1))
#return mask1#+mask2
return mask1
def calculate(self, img_out, img_mask):
if cvutil.major >= 3:
buff, contours,hierarchy = cv2.findContours(img_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
contours,hierarchy = cv2.findContours(img_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return
cy_min, cy_max = self.mirino(img_out)
hierarchy = hierarchy[0]
#y=110
for component in zip(contours, hierarchy):
currentContour = component[0]
currentHierarchy = component[1]
if currentHierarchy[3] >= 0:
continue
area = cv2.contourArea(currentContour)
area = area/100.0
if area > 10:
hull = cv2.convexHull(currentContour)
area = cv2.contourArea(hull)
area = round(area/100)
#hull = cv2.fitEllipse(currentContour)
M = cv2.moments(hull)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if cy < cy_min or cy > cy_max:
continue
#color = ((0,0,255),(0,255,0))[cx < 200]
#if cx < 200:
# area = round(area*1.2)
self.ciliegia.update(img_out, hull, cx, cy, area, cy_min)
#y += 50
def mirino(self, img, delta=0.20):
height, width = img.shape[:2]
cy_max = int(height/2+height*delta)
cy_min = int(height/2-height*delta)
cv2.rectangle(img, (0,cy_min), (width,cy_max), (255,0,255), 2)
return (cy_min, cy_max)
def capture(self):
if self.raspberry:
self.capture_raspberry()
else:
self.capture_pc()
def process(self, frame):
mask = self.detect_object(frame)
result = cv2.bitwise_and(frame, frame, mask=mask)
self.calculate(result, mask)
fps = self.timer.fps
print("fps=%s" % fps)
draw_text(result, "fps=%.0f" % fps)
if self.show:
cv2.imshow('result', result)
def capture_raspberry(self):
for frameraw in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
whitebalance = cv2.getTrackbarPos('white_balance','image')/10.0
self.camera.awb_gains = whitebalance
self.process(frameraw.array)
self.rawCapture.truncate(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
def capture_pc(self):
while(True):
ret, frame = self.cap.read()
if frame is None:
print("no frame")
break
#cv2.imshow('original', frame)
self.process(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("--show", action='store_true', help="show camera")
ap.add_argument("--file", default="")
args = vars(ap.parse_args())
detector = Detector(raspberry=False, show=args['show'], filename=args['file'])
detector.capture()
| [
"[email protected]"
] | |
014e8f1ddcd99487d99ffa878a6e6cfa7d50ed6c | d55bda4c4ba4e09951ffae40584f2187da3c6f67 | /h/admin/views/groups.py | 0caffe6dcf887350fc17bfffc50c9f1ecc8b64bc | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"BSD-2-Clause",
"MIT"
] | permissive | ficolo/h | 3d12f78fe95843b2a8f4fc37231363aa7c2868d9 | 31ac733d37e77c190f359c7ef5d59ebc9992e531 | refs/heads/master | 2021-01-15T21:08:17.554764 | 2016-06-09T15:42:01 | 2016-06-09T15:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | # -*- coding: utf-8 -*-
from pyramid.view import view_config
from h import models
from h import paginator
@view_config(route_name='admin_groups',
request_method='GET',
renderer='h:templates/admin/groups.html.jinja2',
permission='admin_groups')
@paginator.paginate
def groups_index(context, request):
return models.Group.query.order_by(models.Group.created.desc())
@view_config(route_name='admin_groups_csv',
request_method='GET',
renderer='csv',
permission='admin_groups')
def groups_index_csv(request):
groups = models.Group.query
header = ['Group name', 'Group URL', 'Creator username',
'Creator email', 'Number of members']
rows = [[group.name,
request.route_url('group_read',
pubid=group.pubid,
slug=group.slug),
group.creator.username,
group.creator.email,
len(group.members)] for group in groups]
filename = 'groups.csv'
request.response.content_disposition = 'attachment;filename=' + filename
return {'header': header, 'rows': rows}
def includeme(config):
config.scan(__name__)
| [
"[email protected]"
] | |
4ac8a081e46391eeea55282689febe7c06212c68 | 5d9dedd551118449844767da7aa6493dd5efe542 | /selenium_trio/extras/javascript/js_scripts.py | 204825912f77ff029dd97e3791430d64f5e0968b | [
"MIT"
] | permissive | birkin/selenium-trio | 9e3c542ed4fd72c0804841ed9cc40ca2232f7e5b | 3295ba313837d7b1993acf042bad1c7cadb1bfdf | refs/heads/main | 2023-03-23T05:24:06.021387 | 2021-01-25T22:57:46 | 2021-01-25T22:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,723 | py |
PAGE_HEIGHT_JS = """
var body = document.body;
var html = document.documentElement;
var height = Math.max(body.scrollHeight, body.offsetHeight, html.clientHeight, html.scrollHeight, html.offsetHeight);
return height;
"""
CLEAR_CANVAS_JS = """
var canvas = document.getElementById("myCanvas");
var ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, canvas.width, canvas.height);
"""
ADD_FRAGMENT_JS = """
function create(htmlStr) {
var frag = document.createDocumentFragment(),
temp = document.createElement('div');
temp.innerHTML = htmlStr;
while (temp.firstChild) {
frag.appendChild(temp.firstChild);
}
return frag;
}
var fragment = create('%s');
document.body.insertBefore(fragment, %s);
"""
DRAW_LINE_JS = """
var c = document.getElementById("myCanvas");
var context = c.getContext("2d");
context.beginPath();
context.strokeStyle="%s";
context.moveTo(%s, %s);
context.lineTo(%s, %s);
context.stroke();
context.closePath();
"""
DRAW_RECT_JS = """
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
ctx.beginPath();
ctx.strokeStyle="%(colour)s";
ctx.rect(%(x)s,%(y)s,%(width)s,%(height)s);
ctx.stroke();
ctx.closePath();
"""
DRAW_TEXT_JS = """
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
ctx.font="%spx Georgia";
ctx.fillStyle="%s";
ctx.fillText("%s",%s,%s);
"""
DRAW_MULTIPLE_RECTS_JS = """
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
var boxes = %s;
for (var i in boxes) {
box = boxes[i];
ctx.beginPath();
ctx.strokeStyle="%s";
ctx.rect(box[0], box[1], box[2], box[3]);
ctx.stroke();
ctx.closePath();
}
"""
PRELOAD_JS = {
'getOuterHTML_js_multiple': """
function(){
var get_func = function(){
var aa="function"==typeof Object.defineProperties?Object.defineProperty:function(a,c,b){if(b.get||b.set)throw new TypeError("ES3 does not support getters and setters.");a!=Array.prototype&&a!=Object.prototype&&(a[c]=b.value)},ba="undefined"!=typeof window&&window===this?this:"undefined"!=typeof global?global:this;\nfunction e(a,c){if(c){for(var b=ba,d=a.split("."),f=0;f<d.length-1;f++){var h=d[f];h in b||(b[h]={});b=b[h]}d=d[d.length-1];f=b[d];h=c(f);h!=f&&null!=h&&aa(b,d,{configurable:!0,writable:!0,value:h})}}\ne("String.prototype.repeat",function(a){return a?a:function(a){var b;if(null==this)throw new TypeError("The \'this\' value for String.prototype.repeat must not be null or undefined");b=this+"";if(0>a||1342177279<a)throw new RangeError("Invalid count value");a|=0;for(var d="";a;)if(a&1&&(d+=b),a>>>=1)b+=b;return d}});e("Math.sign",function(a){return a?a:function(a){a=Number(a);return!a||isNaN(a)?a:0<a?1:-1}});var g=this;function l(a){return"string"==typeof a};function m(a,c){this.a=n[a]||p;this.message=c||"";var b=this.a.replace(/((?:^|\\s+)[a-z])/g,function(a){return a.toUpperCase().replace(/^[\\s\\xa0]+/g,"")}),d=b.length-5;if(0>d||b.indexOf("Error",d)!=d)b+="Error";this.name=b;b=Error(this.message);b.name=this.name;this.stack=b.stack||""}\n(function(){var a=Error;function c(){}c.prototype=a.prototype;m.b=a.prototype;m.prototype=new c;m.prototype.constructor=m;m.a=function(b,c,f){for(var h=Array(arguments.length-2),k=2;k<arguments.length;k++)h[k-2]=arguments[k];return a.prototype[c].apply(b,h)}})();var p="unknown error",n={15:"element not selectable",11:"element not visible"};n[31]=p;n[30]=p;n[24]="invalid cookie domain";n[29]="invalid element coordinates";n[12]="invalid element state";n[32]="invalid selector";n[51]="invalid selector";\nn[52]="invalid selector";n[17]="javascript error";n[405]="unsupported operation";n[34]="move target out of bounds";n[27]="no such alert";n[7]="no such element";n[8]="no such frame";n[23]="no such window";n[28]="script timeout";n[33]="session not created";n[10]="stale element reference";n[21]="timeout";n[25]="unable to set cookie";n[26]="unexpected alert open";n[13]=p;n[9]="unknown command";m.prototype.toString=function(){return this.name+": "+this.message};var q=String.prototype.trim?function(a){return a.trim()}:function(a){return a.replace(/^[\\s\\xa0]+|[\\s\\xa0]+$/g,"")};\nfunction r(a,c){for(var b=0,d=q(String(a)).split("."),f=q(String(c)).split("."),h=Math.max(d.length,f.length),k=0;!b&&k<h;k++){var S=d[k]||"",ja=f[k]||"",ka=RegExp("(\\\\d*)(\\\\D*)","g"),la=RegExp("(\\\\d*)(\\\\D*)","g");do{var t=ka.exec(S)||["","",""],u=la.exec(ja)||["","",""];if(0==t[0].length&&0==u[0].length)break;b=v(0==t[1].length?0:parseInt(t[1],10),0==u[1].length?0:parseInt(u[1],10))||v(0==t[2].length,0==u[2].length)||v(t[2],u[2])}while(!b)}return b}function v(a,c){return a<c?-1:a>c?1:0};var w;a:{var x=g.navigator;if(x){var y=x.userAgent;if(y){w=y;break a}}w=""}function z(a){return-1!=w.indexOf(a)};function ca(a,c){for(var b=a.length,d=l(a)?a.split(""):a,f=0;f<b;f++)f in d&&c.call(void 0,d[f],f,a)};function A(){return z("iPhone")&&!z("iPod")&&!z("iPad")};function B(){return z("Opera")||z("OPR")}function C(){return(z("Chrome")||z("CriOS"))&&!B()&&!z("Edge")};var D=B(),E=z("Trident")||z("MSIE"),F=z("Edge"),G=z("Gecko")&&!(-1!=w.toLowerCase().indexOf("webkit")&&!z("Edge"))&&!(z("Trident")||z("MSIE"))&&!z("Edge"),da=-1!=w.toLowerCase().indexOf("webkit")&&!z("Edge");function ea(){var a=w;if(G)return/rv\\:([^\\);]+)(\\)|;)/.exec(a);if(F)return/Edge\\/([\\d\\.]+)/.exec(a);if(E)return/\\b(?:MSIE|rv)[: ]([^\\);]+)(\\)|;)/.exec(a);if(da)return/WebKit\\/(\\S+)/.exec(a)}function H(){var a=g.document;return a?a.documentMode:void 0}\nvar I=function(){if(D&&g.opera){var a;var c=g.opera.version;try{a=c()}catch(b){a=c}return a}a="";(c=ea())&&(a=c?c[1]:"");return E&&(c=H(),null!=c&&c>parseFloat(a))?String(c):a}(),J={},K=g.document,L=K&&E?H()||("CSS1Compat"==K.compatMode?parseInt(I,10):5):void 0;!G&&!E||E&&9<=Number(L)||G&&(J["1.9.1"]||(J["1.9.1"]=0<=r(I,"1.9.1")));E&&(J["9"]||(J["9"]=0<=r(I,"9")));var fa=z("Firefox"),ga=A()||z("iPod"),ha=z("iPad"),M=z("Android")&&!(C()||z("Firefox")||B()||z("Silk")),ia=C(),N=z("Safari")&&!(C()||z("Coast")||B()||z("Edge")||z("Silk")||z("Android"))&&!(A()||z("iPad")||z("iPod"));var ma={SCRIPT:1,STYLE:1,HEAD:1,IFRAME:1,OBJECT:1},na={IMG:" ",BR:"\\n"};function oa(a,c,b){if(!(a.nodeName in ma))if(3==a.nodeType)b?c.push(String(a.nodeValue).replace(/(\\r\\n|\\r|\\n)/g,"")):c.push(a.nodeValue);else if(a.nodeName in na)c.push(na[a.nodeName]);else for(a=a.firstChild;a;)oa(a,c,b),a=a.nextSibling};function O(a){return(a=a.exec(w))?a[1]:""}var pa=function(){if(fa)return O(/Firefox\\/([0-9.]+)/);if(E||F||D)return I;if(ia)return O(/Chrome\\/([0-9.]+)/);if(N&&!(A()||z("iPad")||z("iPod")))return O(/Version\\/([0-9.]+)/);if(ga||ha){var a=/Version\\/(\\S+).*Mobile\\/(\\S+)/.exec(w);if(a)return a[1]+"."+a[2]}else if(M)return(a=O(/Android\\s+([0-9.]+)/))?a:O(/Version\\/([0-9.]+)/);return""}();var qa;function P(a){ra?qa(a):M?r(sa,a):r(pa,a)}var ra=function(){if(!G)return!1;var a=g.Components;if(!a)return!1;try{if(!a.classes)return!1}catch(f){return!1}var c=a.classes,a=a.interfaces,b=c["@mozilla.org/xpcom/version-comparator;1"].getService(a.nsIVersionComparator),d=c["@mozilla.org/xre/app-info;1"].getService(a.nsIXULAppInfo).version;qa=function(a){b.compare(d,""+a)};return!0}(),Q;if(M){var ta=/Android\\s+([0-9\\.]+)/.exec(w);Q=ta?ta[1]:"0"}else Q="0";\nvar sa=Q,ua=E&&!(8<=Number(L)),va=E&&!(9<=Number(L));M&&P(2.3);M&&P(4);N&&P(6);function R(a,c){c=c.toLowerCase();if("style"==c)return wa(a.style.cssText);if(ua&&"value"==c&&T(a,"INPUT"))return a.value;if(va&&!0===a[c])return String(a.getAttribute(c));var b=a.getAttributeNode(c);return b&&b.specified?b.value:null}var xa=/[;]+(?=(?:(?:[^"]*"){2})*[^"]*$)(?=(?:(?:[^\']*\'){2})*[^\']*$)(?=(?:[^()]*\\([^()]*\\))*[^()]*$)/;\nfunction wa(a){var c=[];ca(a.split(xa),function(a){var d=a.indexOf(":");0<d&&(a=[a.slice(0,d),a.slice(d+1)],2==a.length&&c.push(a[0].toLowerCase(),":",a[1],";"))});c=c.join("");return c=";"==c.charAt(c.length-1)?c:c+";"}function U(a,c){var b;ua&&"value"==c&&T(a,"OPTION")&&null===R(a,"value")?(b=[],oa(a,b,!1),b=b.join("")):b=a[c];return b}function T(a,c){return!!a&&1==a.nodeType&&(!c||a.tagName.toUpperCase()==c)}\nfunction ya(a){return T(a,"OPTION")?!0:T(a,"INPUT")?(a=a.type.toLowerCase(),"checkbox"==a||"radio"==a):!1};var za={"class":"className",readonly:"readOnly"},V="async autofocus autoplay checked compact complete controls declare defaultchecked defaultselected defer disabled draggable ended formnovalidate hidden indeterminate iscontenteditable ismap itemscope loop multiple muted nohref noresize noshade novalidate nowrap open paused pubdate readonly required reversed scoped seamless seeking selected spellcheck truespeed willvalidate".split(" ");function Aa(a,c){var b=null,d=c.toLowerCase();if("style"==d)return(b=a.style)&&!l(b)&&(b=b.cssText),b;if(("selected"==d||"checked"==d)&&ya(a)){if(!ya(a))throw new m(15,"Element is not selectable");var b="selected",f=a.type&&a.type.toLowerCase();if("checkbox"==f||"radio"==f)b="checked";return U(a,b)?"true":null}var h=T(a,"A");if(T(a,"IMG")&&"src"==d||h&&"href"==d)return(b=R(a,d))&&(b=U(a,d)),b;if("spellcheck"==d){b=R(a,d);if(null!==b){if("false"==b.toLowerCase())return"false";if("true"==b.toLowerCase())return"true"}return U(a,\nd)+""}h=za[c]||c;a:if(l(V))d=l(d)&&1==d.length?V.indexOf(d,0):-1;else{for(var k=0;k<V.length;k++)if(k in V&&V[k]===d){d=k;break a}d=-1}if(0<=d)return(b=null!==R(a,c)||U(a,h))?"true":null;try{f=U(a,h)}catch(S){}(d=null==f)||(d=typeof f,d="object"==d&&null!=f||"function"==d);d?b=R(a,c):b=f;return null!=b?b.toString():null}var W=["_"],X=g;W[0]in X||!X.execScript||X.execScript("var "+W[0]);for(var Y;W.length&&(Y=W.shift());){var Z;if(Z=!W.length)Z=void 0!==Aa;Z?X[Y]=Aa:X[Y]?X=X[Y]:X=X[Y]={}};; return this._.apply(null,arguments);
}
var windowDict = {navigator:typeof window!=\'undefined\'?window.navigator:null,document:typeof window!=\'undefined\'?window.document:null}
results = new Array(arguments.length);
for (i = 0; i < arguments.length; i++) {
results[i] = get_func.apply(windowDict, new Array(arguments[i], "outerHTML"));
}
return results;
}\n
""",
'getRect_multiple_js': """
function(){
results = new Array(arguments.length);
for (i = 0; i < arguments.length; i++) {
var elem = arguments[i];
results[i] = elem.getBoundingClientRect();
}
return results;
}\n
""",
'get_xpaths': """
function(){
function getPathTo(element) {
var tagName = element.tagName.toLowerCase();
if (element===document.body || tagName == "body")
return tagName
var ix= 0;
var siblings= element.parentNode.childNodes;
for (var i= 0; i<siblings.length; i++) {
var sibling= siblings[i];
if (sibling===element)
return getPathTo(element.parentNode)+'/'+tagName+'['+(ix+1)+']';
if (sibling.nodeType===1 && sibling.tagName.toLowerCase()===tagName)
ix++;
}
}
results = [];
for(i=0; i < arguments.length; i++){
results.push(getPathTo(arguments[i]));
}
return results;
}
""",
'get_parent_paths': """
function(){
function getElemDict(element) {
return {
'elem': element, 'tag_name': element.tagName.toLowerCase(),
'outer_html': element.outerHTML
}
var id = null;
if(element.id !== '')
id = element.id;
var other_attrs = {};
for (var att, i = 0, atts = element.attributes, n = atts.length; i < n; i++){
att = atts[i];
if (att.nodeName == 'class')
continue;
other_attrs[att.nodeName] = att.nodeValue;
}
return {
'tag_name': element.tagName.toLowerCase(),
'id': id, 'other_attrs': other_attrs,
'class_attr': element.getAttribute('class'),
'elem': element,
}
}
function getPathTo(element) {
if (element===document.body)
return [getElemDict(element)];
var ix= 0;
var siblings= element.parentNode.childNodes;
for (var i= 0; i < siblings.length; i++) {
var sibling= siblings[i];
if (sibling===element)
return getPathTo(element.parentNode).concat([getElemDict(element)]);
if (sibling.nodeType===1 && sibling.tagName===element.tagName)
ix++;
}
}
results = [];
for(i=0; i < arguments.length; i++){
var path = getPathTo(arguments[i]);
path = path.reverse();
results.push(path);
}
return results;
}
""",
'getComputedCss_multiple_js': """
function(){
var valuesEncountered = new Object();
function getStyleProperties(style){
var result = new Object();
for(var i=0; i<style.length; i++){
var key = style[i];
var val = style[key];
if(val == "none" || val == "auto" || val == null)
continue;
result[key] = val;
var valKey = key + "___" + (val.toString());
valuesEncountered[valKey] = (valuesEncountered[valKey] || 0) + 1;
}
return result;
}
// no longer used
function removeCommonValues(res){
var toRemove = ["transform-origin", "perspective-origin"]; // also remove these noisey values
var ignoreKeys = ["color", "font-size", "font-weight", "visibility", "display"];
for(var key in valuesEncountered){
var count = valuesEncountered[key];
if( count == res.length )
toRemove.push(key.split("___")[0]);
}
for(var i=0; i < toRemove.length; i++){
var key = toRemove[i];
for(var j=0; j < res.length; j++){
var di = res[j]["all_computed_styles"];
delete di[key];
}
}
}
jqueryElems = $(arguments);
hiddenElems = jqueryElems.filter(":hidden");
results = new Array(arguments.length);
for (var i = 0; i < arguments.length; i++) {
styleObj = window.getComputedStyle(arguments[i]);
results[i] = {
"all_computed_styles": getStyleProperties(styleObj),
"color": styleObj["color"],
"font-size": styleObj["font-size"],
"font-weight": styleObj["font-weight"],
"visibility": styleObj["visibility"],
"is_visible_jquery": hiddenElems.index(arguments[i]) == -1, //$(arguments[i]).is(':visible')),
"display": styleObj["display"],
};
if(i == arguments.length-1)
results[i]["encountered"] = valuesEncountered;
}
return results;
}
""",
}
| [
"[email protected]"
] | |
273d052181259e811b893a7ea4d7ed8bdfabe902 | b9a86a79cd0e3fc7a287529f26755a6be4481dff | /tests/conftest.py | e2bb472b3746bf0d712f4b913c075acdd7d41290 | [] | no_license | crossanpdx/python-automation-example | 4de0b40103565f1a9aec5baeb44f942f07f0798b | cb8407d95b9f416753693636a988f66cb79a73b1 | refs/heads/main | 2023-08-01T00:21:32.046270 | 2021-09-21T15:11:11 | 2021-09-21T15:11:11 | 400,588,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | import json
import allure
import pytest
from allure_commons.types import AttachmentType
from utils.driver_factory import DriverFactory
CONFIG_PATH = "../config.json"
DEFAULT_WAIT_TIME = 10
SUPPORTED_BROWSERS = ["chrome", "firefox", "edge"]
DEFAULT_WEBSITE = "http://www.google.com/"
@pytest.fixture(scope='session')
def config():
config_file = open(CONFIG_PATH)
return json.load(config_file)
@pytest.fixture(scope="session")
def browser_setup(config):
if "browser" not in config:
raise Exception('The config file does not contain "browser"')
elif config["browser"] not in SUPPORTED_BROWSERS:
raise Exception(f'"{config["browser"]}" is not a supported browser')
return config["browser"]
@pytest.fixture(scope='session')
def wait_time_setup(config):
return config['wait_time'] if 'wait_time' in config else DEFAULT_WAIT_TIME
@pytest.fixture(scope='session')
def website_setup(config):
return config['tested_page'] if 'tested_page' in config else DEFAULT_WEBSITE
@pytest.fixture()
def setup(request, config):
driver = DriverFactory.get_driver(config["browser"], config["headless_mode"])
driver.implicitly_wait(config["timeout"])
request.cls.driver = driver
before_failed = request.session.testsfailed
if config["browser"] == "firefox":
driver.maximize_window()
yield
if request.session.testsfailed != before_failed:
allure.attach(driver.get_screenshot_as_png(), name="Test failed", attachment_type=AttachmentType.PNG)
driver.quit()
| [
"[email protected]"
] | |
2e47d06edd2437531dd3a914f78047817501ac02 | 643f3af7cbc39b663e2d74b8c9d4c8920569e7cf | /postgres/app/languages.py | 11a73c510e42368272b2dd8a684d83cd4c278aea | [
"MIT"
] | permissive | r2k0/py-apps | a4df131ad7a96b87a4cbca2e4dd55a336c6bfa93 | 0df12629dd689b5d3e7f987ce35ee37a78f4a924 | refs/heads/master | 2020-03-28T18:49:38.605365 | 2014-10-22T05:38:19 | 2014-10-22T05:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import psycopg2
import sys
import psycopg2.extras
langs = (
(1,'Javascript',294832),
(2,'Java',176000),
(3,'CSS',151000),
(4,'Python',142272),
(5,'Ruby',132397)
)
con = None
try:
con = psycopg2.connect("dbname='testdb' user='vagrant'")
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Languages")
cur.execute("CREATE TABLE Languages(Id INT PRIMARY KEY, Name TEXT, Repos INT)")
query = "INSERT INTO Languages (Id, Name, Repos) VALUES (%s,%s,%s)"
cur.executemany(query, langs)
con.commit()
cur.execute("SELECT * FROM Languages")
rows = cur.fetchall()
for r in rows: print r[0],r[1],r[2]
#using Dictionary cursor
cursor = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute("SELECT * FROM Languages")
rows = cursor.fetchall()
for r in rows:
print r["name"],r["repos"]
except psycopg2.DatabaseError, e:
if con: con.rollback()
print e
sys.exit(1)
finally:
if con: con.close()
| [
"[email protected]"
] | |
446d33ef9eb76038d095c98939f9b31af266e4c5 | 26d76543052c86359ab25e78eac9ddd950220670 | /session1/addition.py | 204b1cf7d21576dede795f4fc2790afab276f192 | [] | no_license | HarshMaurya-coder/pythonbasics | ee67d1c2c61c01f188bba2a25d052385deb142a6 | 916af7678b9068ec1d641b9b3bd1bcd1e89c9611 | refs/heads/main | 2023-02-19T17:16:46.109718 | 2021-01-12T16:04:04 | 2021-01-12T16:04:04 | 328,421,112 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
print("press 1 for addition")
operator = input()
if operator == "1":
print(num1 + num2)
| [
"[email protected]"
] | |
c8551e2daf0a9f2f18a67975e6278b4397c7c41d | ef6629579799f113825076462a833d58044696f2 | /log.py | e70068ec45088aaa12d0270443bda684cf10815a | [] | no_license | shivamgupta97/ML-ALGO- | b1eb886ba6bf8b64b70626e83828cf46cfeeada9 | 2ce806fbdae2b55b6d0e0fa91b68ff29061a74b7 | refs/heads/master | 2020-03-22T22:14:05.005517 | 2018-07-12T16:55:55 | 2018-07-12T16:55:55 | 140,741,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | import numpy as np
#import panda as pd
from sklearn import datasets
class log:
def __init__(self, lr=0.01 ,it=10000,fit_int=True, verbose=False):
self.lr=lr
self.it=it
self.fit_int=fit_int
def add_int(self,d):
x=np.asarray(d)
inter=np.ones((x.shape[0],1))
print(np.concatenate((inter, x), axis=1))
return np.concatenate((inter, x), axis=1)
def sig(self, x):
return 1/(1+ np.exp(-x))
def loss(self , h ,y):
s= ((-y*np.log(h)) - (1 - y) * np.log(1 - h))
print("hello")
print(s)
print("vscd")
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
#return ((-y*np.log(h)) - (1 - y) * np.log(1 - h)).mean()
def fit(self, x, y):
if self.fit_int:
x= self.add_int(x)
print(x)
self.theta = np.zeros(x.shape[1])
for i in range(self.it):
z=np.dot(x,self.theta)
h=self.sig(z)
grad= np.dot(x.T, (h-y)) / y.size
self.theta -= self.lr * grad
z=np.dot(x,self.theta)
h=self.sig(h)
print(self.loss(h, y))
def pred(self,x):
q=0
k=0
j=np.asarray(x);
print(np.dot(x,self.theta))
return self.sig(np.dot(x,self.theta))
def get(self,x,t):
print((self.pred(x)))
return bool(self.pred(x)>=t)
def run():
iris = datasets.load_iris()
x = iris.data[:,:2]
y = (iris.target!=0)*1
print(x)
print(y)
a = log(lr=0.1, it = 10000)
a.fit(x,y)
#print(a.loss(x,y))
q=[1,5.1,3.5]
t=0.5
print("vds;kjbeqk")
print(a.get(q,t))
if __name__ == '__main__':
run() | [
"[email protected]"
] | |
b9aeff68654c2ed50000a30879c2e21c640d81e5 | 0206ac23a29673ee52c367b103dfe59e7733cdc1 | /src/nemo/compare_2nemo_simulations.py | 041bbfd0229b247c34b4796abf04bc639b9483ae | [] | no_license | guziy/RPN | 2304a93f9ced626ae5fc8abfcc079e33159ae56a | 71b94f4c73d4100345d29a6fbfa9fa108d8027b5 | refs/heads/master | 2021-11-27T07:18:22.705921 | 2021-11-27T00:54:03 | 2021-11-27T00:54:03 | 2,078,454 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | from collections import namedtuple
from pathlib import Path
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from nemo.nemo_yearly_files_manager import NemoYearlyFilesManager
__author__ = 'huziy'
# Compare 2 Nemo outputs
import matplotlib.pyplot as plt
import numpy as np
def main_compare_max_yearly_ice_conc():
"""
ice concentration
"""
var_name = ""
start_year = 1979
end_year = 1985
SimConfig = namedtuple("SimConfig", "path label")
base_config = SimConfig("/home/huziy/skynet3_rech1/offline_glk_output_daily_1979-2012", "ERAI-driven")
modif_config = SimConfig("/home/huziy/skynet3_rech1/one_way_coupled_nemo_outputs_1979_1985", "CRCM5")
nemo_manager_base = NemoYearlyFilesManager(folder=base_config.path, suffix="icemod.nc")
nemo_manager_modif = NemoYearlyFilesManager(folder=modif_config.path, suffix="icemod.nc")
icecov_base, icecov_ts_base = nemo_manager_base.get_max_yearly_ice_fraction(start_year=start_year,
end_year=end_year)
icecov_modif, icecov_ts_modif = nemo_manager_modif.get_max_yearly_ice_fraction(start_year=start_year,
end_year=end_year)
lons, lats, bmp = nemo_manager_base.get_coords_and_basemap()
xx, yy = bmp(lons.copy(), lats.copy())
# Plot as usual: model, obs, model - obs
img_folder = Path("nemo/{}vs{}".format(modif_config.label, base_config.label))
if not img_folder.is_dir():
img_folder.mkdir(parents=True)
img_file = img_folder.joinpath("compare_yearmax_icecov_{}_vs_{}_{}-{}.pdf".format(
modif_config.label, base_config.label, start_year, end_year))
fig = plt.figure()
gs = GridSpec(2, 3, width_ratios=[1, 1, 0.05])
cmap = cm.get_cmap("jet", 10)
diff_cmap = cm.get_cmap("RdBu_r", 10)
# base
ax = fig.add_subplot(gs[0, 0])
cs = bmp.contourf(xx, yy, icecov_base, cmap=cmap)
bmp.drawcoastlines(ax=ax)
ax.set_title(base_config.label)
# modif
ax = fig.add_subplot(gs[0, 1])
cs = bmp.contourf(xx, yy, icecov_modif, cmap=cmap, levels=cs.levels)
plt.colorbar(cs, cax=fig.add_subplot(gs[0, -1]))
bmp.drawcoastlines(ax=ax)
ax.set_title(modif_config.label)
# difference
ax = fig.add_subplot(gs[1, :])
cs = bmp.contourf(xx, yy, icecov_modif - icecov_base, cmap=diff_cmap, levels=np.arange(-1, 1.2, 0.2))
bmp.colorbar(cs, ax=ax)
bmp.drawcoastlines(ax=ax)
fig.tight_layout()
fig.savefig(str(img_file), bbox_inches="tight")
ax.set_title("{}-{}".format(modif_config.label, base_config.label))
plt.close(fig)
# Plot time series
img_file = img_folder.joinpath("ts_compare_yearmax_icecov_{}_vs_{}_{}-{}.pdf".format(
modif_config.label, base_config.label, start_year, end_year))
fig = plt.figure()
plt.plot(range(start_year, end_year + 1), icecov_ts_base, "b", lw=2, label=base_config.label)
plt.plot(range(start_year, end_year + 1), icecov_ts_modif, "r", lw=2, label=modif_config.label)
plt.legend()
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.grid()
plt.xlabel("Year")
fig.tight_layout()
fig.savefig(str(img_file), bbox_inches="tight")
if __name__ == '__main__':
import application_properties
application_properties.set_current_directory()
main_compare_max_yearly_ice_conc() | [
"[email protected]"
] | |
18b07d81056da8370a9d8501f88b08674c67ba74 | ac28fd7497b2ce1fdd95441aa54706baf8a2123c | /GoogleAppEngine/ascii_chan/main.py | 5e619775f9776d39ae4b5c1f44a4dd9c015445ef | [] | no_license | rollersitch/All_my_code | 6b6defbb5620372f6fdefc094acd8d73ff2a9def | 964ba36b10d6b5b79b57e42a335643347b229106 | refs/heads/master | 2021-04-28T23:17:52.550381 | 2017-09-13T15:30:30 | 2017-09-13T15:30:30 | 77,738,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | import webapp2
import jinja2
import os
import urllib2
import sys
import logging
from google.appengine.ext import db
from google.appengine.api import memcache
from xml.dom import minidom
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)
# art_key = db.Key.from_path('ascii_chan', 'arts')
IP_URL = "http://freegeoip.net/%(format)s/%(IP)s"
GMAPS_URL = "http://maps.googleapis.com/maps/api/staticmap?size=380x263&sensor=false&"
def get_coords(ip):
url = IP_URL % {'format': 'xml', 'IP': ip}
content = None
try:
content = urllib2.urlopen(url).read()
except urllib2.URLError:
return
if content:
d = minidom.parseString(content)
lat_el = d.getElementsByTagName("Latitude")
lon_el = d.getElementsByTagName("Longitude")
if lat_el and lon_el:
# How crazy is that?? Thank you minidom
lon, lat = lon_el[0].firstChild.nodeValue, lat_el[0].firstChild.nodeValue
return db.GeoPt(lat, lon)
def gmaps_img(points):
markers = '&'.join('markers=%s,%s' % (p.lat, p.lon) for p in points)
return GMAPS_URL + markers
def top_arts(update=False):
key = 'top'
arts = memcache.get(key)
if arts is None or update:
logging.error("DB_QUERY")
arts = db.GqlQuery("SELECT * "
"FROM Art "
"ORDER BY created DESC ")
arts = list(arts)
memcache.set(key, arts)
return arts
def console(s):
sys.stderr.write('%s\n' % s)
class Handler(webapp2.RequestHandler):
"""Useful functions for jinja2 templates"""
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Art(db.Model):
title = db.StringProperty(required=True)
art = db.TextProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
coords = db.GeoPtProperty()
class MainPage(Handler):
def render_front(self, title="", art="", error=""):
arts = top_arts()
img_url = None
points = filter(None, (a.coords for a in arts))
if points:
img_url = gmaps_img(points)
self.render('front.html', title=title, art=art, error=error, arts=arts, img_url=img_url)
def get(self):
# self.write(repr(get_coords(self.request.remote_addr)))
self.render_front()
def post(self):
title = self.request.get('title')
art = self.request.get('art')
if title and art:
a = Art(title=title, art=art)
coords = get_coords(self.request.remote_addr)
if coords:
a.coords = coords
a.put()
top_arts(True)
self.redirect('/')
else:
error = "we need both a title and some artwork"
self.render_front(title=title, art=art, error=error)
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)
| [
"[email protected]"
] | |
0693ea164d0f55d55cd10b0b62feb9852c3317d9 | 548fe32bf338f9ef270add39cd849f31c26de107 | /2016/day1.py | 77c9fdefaf54184e7139ac7debc1d9e91a523b4a | [] | no_license | JamesWalter/AdventOfCode | 764799fe1cb3c6657945140c597d3b01247a91d3 | 3e203d9c95721b93c5e2308c9c1703c9743cf110 | refs/heads/master | 2021-09-03T17:15:55.765822 | 2018-01-10T17:06:26 | 2018-01-10T17:06:26 | 112,657,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,108 | py | """Advent of Code 2016 Day 1 No Time for a Taxicab"""
# Santa's sleigh uses a very high-precision clock to guide its movements,
# and the clock's oscillator is regulated by stars. Unfortunately, the stars
# have been stolen... by the Easter Bunny. To save Christmas, Santa needs
# you to retrieve all fifty stars by December 25th.
#
# Collect stars by solving puzzles. Two puzzles will be made available on
# each day in the advent calendar; the second puzzle is unlocked when you
# complete the first. Each puzzle grants one star. Good luck!
#
# You're airdropped near Easter Bunny Headquarters in a city somewhere.
# "Near", unfortunately, is as close as you can get - the instructions on
# the Easter Bunny Recruiting Document the Elves intercepted start here, and
# nobody had time to work them out further.
#
# The Document indicates that you should start at the given coordinates
# (where you just landed) and face North. Then, follow the provided
# sequence: either turn left (L) or right (R) 90 degrees, then walk forward
# the given number of blocks, ending at a new intersection.
#
# There's no time to follow such ridiculous instructions on foot, though, so
# you take a moment and work out the destination. Given that you can only
# walk on the street grid of the city, how far is the shortest path to the
# destination?
#
# For example:
#
# -Following R2, L3 leaves you 2 blocks East and 3 blocks North, or 5 blocks
# away.
# -R2, R2, R2 leaves you 2 blocks due South of your starting position, which
# is 2 blocks away.
# -R5, L5, R5, R3 leaves you 12 blocks away.
#
# How many blocks away is Easter Bunny HQ?
import re
from collections import deque
class Elf(object):
"""Elf class"""
def __init__(self, compass, x, y):
self.compass = compass
self.x = x
self.y = y
def turn(direction, elf):
"""Turn the compass of a an elf"""
if direction == 'R':
elf.compass.rotate(-1)
elif direction == 'L':
elf.compass.rotate(1)
def walk(steps, elf):
"""Walk forward"""
if elf.compass[0] == 'N':
elf.y = elf.y + int(steps)
elif elf.compass[0] == 'E':
elf.x = elf.x + int(steps)
elif elf.compass[0] == 'S':
elf.y = elf.y - int(steps)
elif elf.compass[0] == 'W':
elf.x = elf.x - int(steps)
def direction_generator(directions):
"""Direction Generator"""
for field in re.finditer(r"[A-Z0-9]+", directions):
yield field.group(0)
def solve1(sequence):
"""Solve the puzzle part 1"""
paratrooper_elf = Elf(deque(['N', 'E', 'S', 'W']), 0, 0)
for direction in direction_generator(sequence):
turn(re.search(r'[RL]', direction).group(0), paratrooper_elf)
walk(re.search(r'\d+', direction).group(0), paratrooper_elf)
print abs(paratrooper_elf.x) + abs(paratrooper_elf.y)
# --- Part Two ---
#
# Then, you notice the instructions continue on the back of the Recruiting
# Document. Easter Bunny HQ is actually at the first location you visit
# twice.
#
# For example, if your instructions are R8, R4, R4, R8, the first location
# you visit twice is 4 blocks away, due East.
#
# How many blocks away is the first location you visit twice?
def walk2(steps, elf):
"""Walk forward noting each step"""
for _ in range(int(steps)):
if elf.compass[0] == 'N':
elf.y = elf.y + 1
elif elf.compass[0] == 'E':
elf.x = elf.x + 1
elif elf.compass[0] == 'S':
elf.y = elf.y - 1
elif elf.compass[0] == 'W':
elf.x = elf.x - 1
elf.record_location()
class Elf2(Elf):
"""Enhance Elf Class"""
def __init__(self, compass, x, y, visited):
super(Elf2, self).__init__(compass, x, y)
self.visited = visited
def record_location(self):
"""Record current Location"""
point = (self.x, self.y)
if self.visited.has_key(point) is True:
print str(abs(self.x) + abs(self.y))
exit()
else:
self.visited[point] = True
def solve2(sequence):
"""Solve the puzzle part 2"""
paratrooper_elf = Elf2(deque(['N', 'E', 'S', 'W']), 0, 0, {(0, 0):True})
for direction in direction_generator(sequence):
turn(re.search(r'[RL]', direction).group(0), paratrooper_elf)
walk2(re.search(r'\d+', direction).group(0), paratrooper_elf)
INPUT = "L1, L3, L5, L3, R1, L4, L5, R1, R3, L5, R1, L3, L2, L3, R2, R2, L3, L3, R1, L2, R1, L3, L2, R4, R2, L5, R4, L5, R4, L2, R3, L2, R4, R1, L5, L4, R1, L2, R3, R1, R2, L4, R1, L2, R3, L2, L3, R5, L192, R4, L5, R4, L1, R4, L4, R2, L5, R45, L2, L5, R4, R5, L3, R5, R77, R2, R5, L5, R1, R4, L4, L4, R2, L4, L1, R191, R1, L1, L2, L2, L4, L3, R1, L3, R1, R5, R3, L1, L4, L2, L3, L1, L1, R5, L4, R1, L3, R1, L2, R1, R4, R5, L4, L2, R4, R5, L1, L2, R3, L4, R2, R2, R3, L2, L3, L5, R3, R1, L4, L3, R4, R2, R2, R2, R1, L4, R4, R1, R2, R1, L2, L2, R4, L1, L2, R3, L3, L5, L4, R4, L3, L1, L5, L3, L5, R5, L5, L4, L2, R1, L2, L4, L2, L4, L1, R4, R4, R5, R1, L4, R2, L4, L2, L4, R2, L4, L1, L2, R1, R4, R3, R2, R2, R5, L1, L2"
solve1(INPUT)
solve2(INPUT)
| [
"[email protected]"
] | |
92e85c7b6e66817ecaf916d920cc1d86019397c2 | fe9573bad2f6452ad3e2e64539361b8bc92c1030 | /scapy_code/sniif_packet.py | 97cbf240c0083c9937735a47714341cd1d7da111 | [] | no_license | OceanicSix/Python_program | e74c593e2e360ae22a52371af6514fcad0e8f41f | 2716646ce02db00306b475bad97105b260b6cd75 | refs/heads/master | 2022-01-25T16:59:31.212507 | 2022-01-09T02:01:58 | 2022-01-09T02:01:58 | 149,686,276 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from scapy.all import *
def print_pkt(pkt):
print("---------------this is a new packet----------------------")
new_pkt = pkt[IP]
if new_pkt[ICMP]:
new_pkt.show()
sniff(filter= "icmp" , prn=print_pkt)
| [
"[email protected]"
] | |
ac829ac73598a0d0da9c3829ae92352cec0d73c6 | 1fbf278cca477451d6b3bae4ecba31b018ced42e | /tlssecondopinion/wsgi.py | d580ee045afb8b460834bd4044b5cc19e4dfa13f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | MiWCryptAnalytics/tlssecondopinion | 2451049a3c553aa1fa4ff5ef6357cdcb9f3a8259 | f1eebf753cc898ba546bf1371f3ce1ea848d17d6 | refs/heads/master | 2016-09-06T07:39:45.921966 | 2015-08-31T14:23:39 | 2015-08-31T14:23:39 | 41,423,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for tlssecondopinion project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tlssecondopinion.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
9e22f5283f06022fe6da4ddeb1f1e16516a673c8 | de6bc17d71706ff030926c273f7f3435082ede9b | /tutorial/snippets/models.py | 114d28c48670ff91859764281348acf974bd6ffa | [
"MIT"
] | permissive | PabloSuarez/API_django | c1054a4103b6a0bc460bb92a484db3d2bf5f9bcb | 522cc5b052c13c38fc7ef95353b8e3640126feaa | refs/heads/master | 2021-01-15T14:43:09.911839 | 2015-09-23T21:17:59 | 2015-09-23T21:17:59 | 42,883,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | from django.db import models
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
#add to save method
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([ (item[1][0], item[0]) for item in LEXERS ])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
owner = models.ForeignKey('auth.User', related_name='snippets')
highlighted = models.TextField()
class Meta:
ordering = ('created',)
def save(self, *args, **kwargs):
"""
Use the `pygments` library to create a highlighted HTML
representation of the code snippet.
"""
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos,
full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs)
| [
"[email protected]"
] | |
87db78fc9bb040bc77eeeb14ffba6ee78b8c43fa | 42394bd8cd674dcd0822ae288ddb4f4e749a6ed6 | /fluent_blogs/sitemaps.py | 97da332b7a014536107d1f7fe042d295b321ac83 | [
"Apache-2.0"
] | permissive | mmggbj/django-fluent-blogs | 4bca6e7effeca8b4cee3fdf4f8bb4eb4d192dfbe | 7fc3220d6609fe0615ad6ab44044c671d17d06a3 | refs/heads/master | 2021-05-08T13:02:51.896360 | 2018-01-31T21:54:27 | 2018-01-31T21:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,647 | py | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.sitemaps import Sitemap
from fluent_blogs.models import get_entry_model, get_category_model
from fluent_blogs.urlresolvers import blog_reverse
from parler.models import TranslatableModel
User = get_user_model()
EntryModel = get_entry_model()
CategoryModel = get_category_model()
class EntrySitemap(Sitemap):
"""
The sitemap definition for the pages created with django-fluent-blogs.
"""
def items(self):
qs = EntryModel.objects.published().order_by('-publication_date')
if issubclass(EntryModel, TranslatableModel):
# Note that .active_translations() can't be combined with other filters for translations__.. fields.
qs = qs.active_translations()
return qs.order_by('-publication_date', 'translations__language_code')
else:
return qs.order_by('-publication_date')
def lastmod(self, urlnode):
"""Return the last modification of the entry."""
return urlnode.modification_date
def location(self, urlnode):
"""Return url of an entry."""
return urlnode.url
class CategoryArchiveSitemap(Sitemap):
def items(self):
only_ids = EntryModel.objects.published().values('categories').order_by().distinct()
return CategoryModel.objects.filter(id__in=only_ids)
def lastmod(self, category):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(categories=category).only('modification_date')
return lastitems[0].modification_date
def location(self, category):
"""Return url of an entry."""
return blog_reverse('entry_archive_category', kwargs={'slug': category.slug}, ignore_multiple=True)
class AuthorArchiveSitemap(Sitemap):
def items(self):
only_ids = EntryModel.objects.published().values('author').order_by().distinct()
return User.objects.filter(id__in=only_ids)
def lastmod(self, author):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(author=author).only('modification_date')
return lastitems[0].modification_date
def location(self, author):
"""Return url of an entry."""
return blog_reverse('entry_archive_author', kwargs={'slug': author.username}, ignore_multiple=True)
class TagArchiveSitemap(Sitemap):
def items(self):
# Tagging is optional. When it's not used, it's ignored.
if 'taggit' not in settings.INSTALLED_APPS:
return []
from taggit.models import Tag
only_instances = EntryModel.objects.published().only('pk')
# Until https://github.com/alex/django-taggit/pull/86 is merged,
# better use the field names directly instead of bulk_lookup_kwargs
return Tag.objects.filter(
taggit_taggeditem_items__object_id__in=only_instances,
taggit_taggeditem_items__content_type=ContentType.objects.get_for_model(EntryModel)
)
def lastmod(self, tag):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date')
return lastitems[0].modification_date
def location(self, tag):
"""Return url of an entry."""
return blog_reverse('entry_archive_tag', kwargs={'slug': tag.slug}, ignore_multiple=True)
| [
"[email protected]"
] | |
c693954cad97f78d72668a79087d4930ccea1091 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/source-opsgenie/source_opsgenie/source.py | 743694d15b54b6ca441b6e91b3a528af43f6b85c | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 1,870 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from .streams import AlertLogs, AlertRecipients, Alerts, Incidents, Integrations, Services, Teams, Users, UserTeams
# Source
class SourceOpsgenie(AbstractSource):
@staticmethod
def get_authenticator(config: Mapping[str, Any]):
return TokenAuthenticator(config["api_token"], auth_method="GenieKey")
def check_connection(self, logger, config) -> Tuple[bool, any]:
try:
auth = self.get_authenticator(config)
api_endpoint = f"https://{config['endpoint']}/v2/account"
response = requests.get(
api_endpoint,
headers=auth.get_auth_header(),
)
return response.status_code == requests.codes.ok, None
except Exception as error:
return False, f"Unable to connect to Opsgenie API with the provided credentials - {repr(error)}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = self.get_authenticator(config)
args = {"authenticator": auth, "endpoint": config["endpoint"]}
incremental_args = {**args, "start_date": config.get("start_date", "")}
users = Users(**args)
alerts = Alerts(**incremental_args)
return [
alerts,
AlertRecipients(parent_stream=alerts, **args),
AlertLogs(parent_stream=alerts, **args),
Incidents(**incremental_args),
Integrations(**args),
Services(**args),
Teams(**args),
users,
UserTeams(parent_stream=users, **args),
]
| [
"[email protected]"
] | |
83a912f2fd9bb92402ffe65df2ebaf7a667edd7e | e590449a05b20712d777fc5f0fa52097678c089b | /python-client/test/test_stash_appscode_com_v1alpha1_api.py | 58eaf340d2c3e2c403e782c27e9854d90c2f4271 | [
"Apache-2.0"
] | permissive | Hardeep18/kube-openapi-generator | 2563d72d9f95196f8ef795896c08e8e21cd1a08e | 6607d1e208965e3a09a0ee6d1f2de7e462939150 | refs/heads/master | 2020-04-11T03:30:18.786896 | 2018-05-05T20:57:51 | 2018-05-05T20:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,173 | py | # coding: utf-8
"""
stash-server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.stash_appscode_com_v1alpha1_api import StashAppscodeComV1alpha1Api # noqa: E501
from swagger_client.rest import ApiException
class TestStashAppscodeComV1alpha1Api(unittest.TestCase):
"""StashAppscodeComV1alpha1Api unit test stubs"""
def setUp(self):
self.api = swagger_client.api.stash_appscode_com_v1alpha1_api.StashAppscodeComV1alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_recovery(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_recovery
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_repository(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_repository
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_restic(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_restic
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_get_stash_appscode_com_v1alpha1_api_resources(self):
"""Test case for get_stash_appscode_com_v1alpha1_api_resources
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_list_stash_appscode_com_v1alpha1_recovery_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_recovery_for_all_namespaces
"""
pass
def test_list_stash_appscode_com_v1alpha1_repository_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_repository_for_all_namespaces
"""
pass
def test_list_stash_appscode_com_v1alpha1_restic_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_restic_for_all_namespaces
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_recovery_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_recovery_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_repository_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_repository_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_restic_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_restic_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_recovery_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_recovery_list_for_all_namespaces
"""
pass
def test_watch_stash_appscode_com_v1alpha1_repository_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_repository_list_for_all_namespaces
"""
pass
def test_watch_stash_appscode_com_v1alpha1_restic_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_restic_list_for_all_namespaces
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c23fda02795ce8062a1ea9feb8f8170ac4d0bc95 | 31dff1fae6d31d84a442c8af5dc75fa7f98d3570 | /main.py | 1d4ed854e5cb6b7f060fca5b4785c734180ab50a | [] | no_license | jblakeh1/circuit-playground-lamp | e1e7fdd7227d18d44d273cc93c7d0ed93b1435a5 | 71949de0f7569596b4c11ec69e8ba74a1388bc4d | refs/heads/master | 2020-06-22T06:14:33.443595 | 2019-07-18T20:50:30 | 2019-07-18T20:50:30 | 197,654,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,282 | py | # digital rothko
# January 15, 2019
# J. Blake Harris
# Display two colors on a string of neopixels based
# on sound and light sensors
# on the Adafruit Playground Express
from simpleio import map_range
from analogio import AnalogIn
from digitalio import DigitalInOut, Direction, Pull
import array
import audiobusio
import board
import neopixel
import math
import time
SHERBET = (127, 20, 0)
SALMON = (80, 0, 20)
POMENGRANITE = (127, 0, 2)
TOMATO = (208, 8, 8)
LEMON = (127, 80, 0)
PERIWINKLE = (2, 0, 80)
AQUA = (0, 80, 20)
MARINE = (0, 80, 40)
MIDNIGHT = (20, 0, 127)
LIME = (20, 127, 0)
TOP_COLORS = [LIME, MIDNIGHT, MARINE, POMENGRANITE, PERIWINKLE, TOMATO, LEMON, AQUA, SHERBET,LIME, LIME]
BTM_COLORS = [MIDNIGHT, AQUA, LEMON, SALMON, POMENGRANITE, LEMON, SHERBET, LEMON, POMENGRANITE, AQUA, AQUA]
# light meter -------------------------------------------------------------
analogLight = AnalogIn(board.LIGHT)
# microphone --------------------------------------------------------------
# Exponential scaling factor.
# Should probably be in range -10 .. 10 to be reasonable.
CURVE = 2
SCALE_EXPONENT = math.pow(10, CURVE*-0.1)
# Number of samples to read at once.
NUM_SAMPLES = 160
# Restrict value to be between floor and ceiling.
def constrain(value, floor, ceiling):
return max(floor, min(value, ceiling))
# Scale input_value to be between output_min and output_max, in an exponential way.
def log_scale(input_value, input_min, input_max, output_min, output_max):
normalized_input_value = (input_value - input_min) / (input_max - input_min)
return output_min + math.pow(normalized_input_value, SCALE_EXPONENT) * (output_max - output_min)
# Remove DC bias before computing RMS.
def normalized_rms(values):
minbuf = int(mean(values))
return math.sqrt(sum(float((sample-minbuf)*(sample-minbuf)) for sample in values)/len(values))
def mean(values):
return (sum(values) / len(values))
mic = audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA, sample_rate=16000, bit_depth=16)
# Record an initial sample to calibrate. Assume it's quiet when we start.
samples = array.array('H', [0] * NUM_SAMPLES)
mic.record(samples, len(samples))
# Set lowest level to expect, plus a little.
input_floor = normalized_rms(samples) + 10
# OR: used a fixed floor
# input_floor = 50
# You might want to print the input_floor to help adjust other values.
# print(input_floor)
# Corresponds to sensitivity: lower means more pixels light up with lower sound
# Adjust this as you see fit.
input_ceiling = input_floor + 10
peak = 0
# on-board neopixels ------------------------------------------------------
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, auto_write=0, brightness=0.3)
pixels.fill(POMENGRANITE)
pixels.show()
# pin neopixels
pin = neopixel.NeoPixel(board.A7, 120)
while True:
# read light meter ----------------------------------------------------
# light value remapped to 0 - 9
lightReading = map_range(analogLight.value, 2000, 100000, 0, 10)
print("light:")
print(int(lightReading))
if lightReading > 10:
lightReading = 10
# read microphone ----------------------------------------------------
mic.record(samples, len(samples))
magnitude = normalized_rms(samples)
# You might want to print this to see the values.
# print(magnitude)
# Compute scaled logarithmic reading in the range 0 to 8
soundReading = log_scale(constrain(magnitude, input_floor, input_ceiling), input_floor, input_ceiling, 0, 100)
print("sound:")
soundReading = soundReading/10
if soundReading > 10:
soundReading = 10
print(int(soundReading))
finalReading = int((lightReading + soundReading)/2)
print("final:")
print(int(finalReading))
print("-----------------")
for i in range(0, 5):
pixels[i] = TOP_COLORS[int(finalReading)]
pixels.show()
time.sleep(0.05)
for i in range(5, 10):
pixels[i] = BTM_COLORS[int(finalReading)]
pixels.show()
time.sleep(0.05)
for i in range(0, 30):
pin[i] = BTM_COLORS[int(finalReading)]
pin.show()
time.sleep(0.01)
for i in range(30, 60):
pin[i] = TOP_COLORS[int(finalReading)]
pin.show()
time.sleep(0.01)
| [
"[email protected]"
] |
Subsets and Splits