blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8867897937961a346c8a6e06b802258aec31ed5c | 02f42acb280288acba54fc045c4163df2f069d55 | /project/migrations/0008_merge_20210312_1000.py | 9e0c30331fec84f947450cf9076177129855f675 | [] | no_license | banana1019/tumbluv-backend | a05b0faebc63e83248b2e81d026fe26d5a5c085b | 8592112284672fefdf9e4e76aeee1d1cedaaee82 | refs/heads/main | 2023-04-12T20:16:09.992472 | 2021-04-26T10:59:47 | 2021-04-26T10:59:47 | 361,158,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # Generated by Django 3.1.6 on 2021-03-12 10:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0007_auto_20210309_2313'),
('project', '0007_auto_20210309_2055'),
]
operations = [
]
| [
"[email protected]"
] | |
cdcb2620849cb8b9df057128cafd916549c05694 | 5fcc3fd608a794d260368318c62547f74d4c1416 | /checksum_Luhn.py | c0db1474e1bb065768ca070528051853e468d68b | [] | no_license | ds-gurukandhamoorthi/intro-python-exs | 241fb9158096479a100ef378f291ba83e1a7d5d4 | 68c386e51c13d0f31e273016eefc4e29ddecdc04 | refs/heads/master | 2022-02-25T22:28:41.061722 | 2019-10-22T18:36:46 | 2019-10-22T18:36:46 | 103,829,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | import argparse
def sum_digits(n):
res = n % 9
return res if res > 0 else res + 9
def sum_ISBN(num, mul=3):
return sum(int(n) if i %2 == 0 else int(n)*mul for i, n in enumerate(str(num)))
def is_valid_ISBN(num):
if len(str(num)) != 13:
return False
return sum_ISBN(num) %10 == 0
def is_valid_ISBN_str(str_isbn):
return is_valid_ISBN(str_isbn.replace('-',''))
def sum_digits_Luhn(num, mul=2):
total = 0
for i, digit in enumerate(str(num)):
if i%2 == 0:
total += int(digit)
else:
total += sum_digits(int(digit)*mul)
return total
#Luhn's algorithm
def checksum_Luhn(num):
return sum_digits_Luhn(num) % 10 == 0
def make_checksum_Luhn(num):
append = 10 - sum_digits_Luhn(num) % 10
return int(str(num) + str(append))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Checksum as used in banks')
parser.add_argument('number', type=int, help='checksum for number: 10 digits to create, 11 digits to check')
args = parser.parse_args()
number = str(args.number)
if(len(number) == 10):
res = make_checksum_Luhn(number)
print(res)
else:
print(checksum_Luhn(number))
| [
"[email protected]"
] | |
117e37cdfea8e8164e6d4d2ce18c49fcaa106dfb | ab9cfa8aa28749ebd18c4fa4c8712c2198e72501 | /复杂链表的复制.py | 9af36bf98445059ae00b4559e5bdf9326c4271a6 | [] | no_license | joseph-mutu/JianZhiOfferCodePics | d71e780483909390b436f81989000a277daac11d | 8d41326cb2b9bc1379682fa6364a68c0ce62dbee | refs/heads/master | 2020-08-03T14:39:59.666806 | 2019-09-30T06:17:36 | 2019-09-30T06:17:36 | 211,788,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-08-26 09:10:31
# @Author : mutudeh ([email protected])
# @Link : ${link}
# @Version : $Id$
import os
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
# 返回 RandomListNode
def Clone(self, pHead):
if not pHead:
return None
# write code here
self.CloneNodes(pHead)
self.CloneRanomPointer(pHead)
return self.SplitChain(pHead)
def CloneNodes(self,pHead):
while pHead:
temNode = RandomListNode(pHead.label)
temNode.next = pHead.next
pHead.next = temNode
pHead = temNode.next
def CloneRanomPointer(self,pHead):
while pHead.next.next:
if pHead.random:
pHead.next.random = pHead.random.next
pHead = pHead.next.next
def SplitChain(self,pHead):
newHead= pHead.next
while pHead.next:
tem = pHead.next
pHead.next = tem.next
pHead = tem
return newHead
a = RandomListNode(1)
a.next = RandomListNode(4)
a.next.next = RandomListNode(5)
a.next.next.next = RandomListNode(7)
a.next.next.next.next = RandomListNode(9)
a.random = a.next.next
a.next.random = a.next.next.next.next
s = Solution()
# while a:
# print(a.label)
# a = a.next
newHead = s.Clone(a)
print(newHead.random.label) | [
"[email protected]"
] | |
2a70a3a2d26a4c0f6b94716d99ee2d8f455af26f | 559fe08f79c297783c404caf7eccee2a269932d4 | /etl/parsers/etw/Microsoft_Windows_Dwm_Udwm.py | 9f2553cd2698dd9018930cadd4cde6567bf6c7ee | [
"Apache-2.0"
] | permissive | killvxk/etl-parser | 9ba70f54120887f56950054f2cde6dc6c18e0973 | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | refs/heads/master | 2022-11-23T03:35:47.127241 | 2020-07-23T08:55:50 | 2020-07-23T08:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,608 | py | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Dwm-Udwm
GUID : a2d1c713-093b-43a7-b445-d09370ec9f47
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=1, version=0)
class Microsoft_Windows_Dwm_Udwm_1_0(Etw):
pattern = Struct(
"flags" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=2, version=0)
class Microsoft_Windows_Dwm_Udwm_2_0(Etw):
pattern = Struct(
"flags" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5, version=0)
class Microsoft_Windows_Dwm_Udwm_5_0(Etw):
pattern = Struct(
"Height" / Int32ul,
"Width" / Int32ul,
"Depth" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5000, version=0)
class Microsoft_Windows_Dwm_Udwm_5000_0(Etw):
pattern = Struct(
"Schedules" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5002, version=0)
class Microsoft_Windows_Dwm_Udwm_5002_0(Etw):
pattern = Struct(
"AnimationType" / Int32ul,
"Hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5003, version=0)
class Microsoft_Windows_Dwm_Udwm_5003_0(Etw):
pattern = Struct(
"AnimationType" / Int32ul,
"Hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5005, version=0)
class Microsoft_Windows_Dwm_Udwm_5005_0(Etw):
pattern = Struct(
"Action" / Int32ul,
"Hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5006, version=0)
class Microsoft_Windows_Dwm_Udwm_5006_0(Etw):
pattern = Struct(
"Action" / Int32ul,
"Hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5007, version=0)
class Microsoft_Windows_Dwm_Udwm_5007_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5008, version=0)
class Microsoft_Windows_Dwm_Udwm_5008_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5009, version=0)
class Microsoft_Windows_Dwm_Udwm_5009_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5010, version=0)
class Microsoft_Windows_Dwm_Udwm_5010_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5014, version=0)
class Microsoft_Windows_Dwm_Udwm_5014_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5015, version=0)
class Microsoft_Windows_Dwm_Udwm_5015_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5016, version=0)
class Microsoft_Windows_Dwm_Udwm_5016_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5017, version=0)
class Microsoft_Windows_Dwm_Udwm_5017_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5020, version=0)
class Microsoft_Windows_Dwm_Udwm_5020_0(Etw):
pattern = Struct(
"Left" / Int32sl,
"Top" / Int32sl,
"Right" / Int32sl,
"Bottom" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5027, version=0)
class Microsoft_Windows_Dwm_Udwm_5027_0(Etw):
pattern = Struct(
"PointerID" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5030, version=0)
class Microsoft_Windows_Dwm_Udwm_5030_0(Etw):
pattern = Struct(
"PointerID" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5031, version=0)
class Microsoft_Windows_Dwm_Udwm_5031_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5032, version=0)
class Microsoft_Windows_Dwm_Udwm_5032_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5051, version=0)
class Microsoft_Windows_Dwm_Udwm_5051_0(Etw):
pattern = Struct(
"flags" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5052, version=0)
class Microsoft_Windows_Dwm_Udwm_5052_0(Etw):
pattern = Struct(
"flags" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5053, version=0)
class Microsoft_Windows_Dwm_Udwm_5053_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5054, version=0)
class Microsoft_Windows_Dwm_Udwm_5054_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5055, version=0)
class Microsoft_Windows_Dwm_Udwm_5055_0(Etw):
pattern = Struct(
"PointerID" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5058, version=0)
class Microsoft_Windows_Dwm_Udwm_5058_0(Etw):
pattern = Struct(
"alignment" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5059, version=0)
class Microsoft_Windows_Dwm_Udwm_5059_0(Etw):
pattern = Struct(
"AnimationID" / Int32ul,
"StoryboardID" / Int32sl,
"TickCount" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5060, version=0)
class Microsoft_Windows_Dwm_Udwm_5060_0(Etw):
pattern = Struct(
"AnimationID" / Int32ul,
"StoryboardID" / Int32sl,
"TickCount" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5063, version=0)
class Microsoft_Windows_Dwm_Udwm_5063_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5064, version=0)
class Microsoft_Windows_Dwm_Udwm_5064_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5065, version=0)
class Microsoft_Windows_Dwm_Udwm_5065_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5070, version=0)
class Microsoft_Windows_Dwm_Udwm_5070_0(Etw):
pattern = Struct(
"EventId" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5071, version=0)
class Microsoft_Windows_Dwm_Udwm_5071_0(Etw):
pattern = Struct(
"AnimationID" / Int32ul,
"x0" / Float32l,
"y0" / Float32l,
"x1" / Float32l,
"y1" / Float32l
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5072, version=0)
class Microsoft_Windows_Dwm_Udwm_5072_0(Etw):
pattern = Struct(
"PointerID" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5073, version=0)
class Microsoft_Windows_Dwm_Udwm_5073_0(Etw):
pattern = Struct(
"PointerID" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5074, version=0)
class Microsoft_Windows_Dwm_Udwm_5074_0(Etw):
pattern = Struct(
"PointerID" / Int32ul,
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5075, version=0)
class Microsoft_Windows_Dwm_Udwm_5075_0(Etw):
pattern = Struct(
"PointerID" / Int32ul,
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5076, version=0)
class Microsoft_Windows_Dwm_Udwm_5076_0(Etw):
pattern = Struct(
"State" / Int32ul,
"AnimationID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5081, version=0)
class Microsoft_Windows_Dwm_Udwm_5081_0(Etw):
pattern = Struct(
"ResourceId" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5083, version=0)
class Microsoft_Windows_Dwm_Udwm_5083_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"StoryboardId" / Int32sl,
"Target" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5084, version=0)
class Microsoft_Windows_Dwm_Udwm_5084_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"StoryboardId" / Int32sl,
"Target" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5085, version=0)
class Microsoft_Windows_Dwm_Udwm_5085_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"StoryboardId" / Int32sl,
"Target" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5086, version=0)
class Microsoft_Windows_Dwm_Udwm_5086_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"StoryboardId" / Int32sl,
"Target" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5087, version=0)
class Microsoft_Windows_Dwm_Udwm_5087_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5088, version=0)
class Microsoft_Windows_Dwm_Udwm_5088_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5089, version=0)
class Microsoft_Windows_Dwm_Udwm_5089_0(Etw):
pattern = Struct(
"StoryboardId" / Int32sl,
"TargetId" / Int32sl,
"BeginLeft" / Int32sl,
"BeginTop" / Int32sl,
"BeginRight" / Int32sl,
"BeginBottom" / Int32sl,
"EndLeft" / Int32sl,
"EndTop" / Int32sl,
"EndRight" / Int32sl,
"EndBottom" / Int32sl,
"BeginOpacity" / Float32l,
"EndOpacity" / Float32l,
"BeginDepth" / Float32l,
"EndDepth" / Float32l,
"ResourceHandle" / Int32ul,
"StaggerOrder" / Int32ul,
"AnimationId" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5090, version=0)
class Microsoft_Windows_Dwm_Udwm_5090_0(Etw):
pattern = Struct(
"PointerID" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5091, version=0)
class Microsoft_Windows_Dwm_Udwm_5091_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5092, version=0)
class Microsoft_Windows_Dwm_Udwm_5092_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5093, version=0)
class Microsoft_Windows_Dwm_Udwm_5093_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"Target" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5096, version=0)
class Microsoft_Windows_Dwm_Udwm_5096_0(Etw):
pattern = Struct(
"Title" / WString
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5115, version=0)
class Microsoft_Windows_Dwm_Udwm_5115_0(Etw):
pattern = Struct(
"EventId" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5117, version=0)
class Microsoft_Windows_Dwm_Udwm_5117_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"Cloaked" / Int32ul,
"Tracked" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5118, version=0)
class Microsoft_Windows_Dwm_Udwm_5118_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"Target" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5119, version=0)
class Microsoft_Windows_Dwm_Udwm_5119_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"Show" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5120, version=0)
class Microsoft_Windows_Dwm_Udwm_5120_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5121, version=0)
class Microsoft_Windows_Dwm_Udwm_5121_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5122, version=0)
class Microsoft_Windows_Dwm_Udwm_5122_0(Etw):
pattern = Struct(
"StoryboardID" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5123, version=0)
class Microsoft_Windows_Dwm_Udwm_5123_0(Etw):
pattern = Struct(
"hwndCloned" / Int64ul,
"hwndAfter" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5124, version=0)
class Microsoft_Windows_Dwm_Udwm_5124_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5125, version=0)
class Microsoft_Windows_Dwm_Udwm_5125_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5126, version=0)
class Microsoft_Windows_Dwm_Udwm_5126_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5127, version=0)
class Microsoft_Windows_Dwm_Udwm_5127_0(Etw):
pattern = Struct(
"clockId" / Guid,
"timespan" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5128, version=0)
class Microsoft_Windows_Dwm_Udwm_5128_0(Etw):
pattern = Struct(
"clockId" / Guid
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5129, version=0)
class Microsoft_Windows_Dwm_Udwm_5129_0(Etw):
pattern = Struct(
"clockId" / Guid,
"timespan" / Int32ul,
"count" / Int64sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5130, version=0)
class Microsoft_Windows_Dwm_Udwm_5130_0(Etw):
pattern = Struct(
"clockId" / Guid,
"count" / Int64sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5131, version=0)
class Microsoft_Windows_Dwm_Udwm_5131_0(Etw):
pattern = Struct(
"clockId" / Guid,
"time" / Int64sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5132, version=0)
class Microsoft_Windows_Dwm_Udwm_5132_0(Etw):
pattern = Struct(
"clockId" / Guid
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5133, version=0)
class Microsoft_Windows_Dwm_Udwm_5133_0(Etw):
pattern = Struct(
"clockId" / Guid,
"time" / Int64sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5134, version=0)
class Microsoft_Windows_Dwm_Udwm_5134_0(Etw):
pattern = Struct(
"clockId" / Guid,
"oldValue" / Int32sl,
"newValue" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5150, version=0)
class Microsoft_Windows_Dwm_Udwm_5150_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5151, version=0)
class Microsoft_Windows_Dwm_Udwm_5151_0(Etw):
pattern = Struct(
"StoryboardId" / Int32sl,
"TargetId" / Int32sl,
"VisualHandle" / Int32ul,
"EffectGroupHandle" / Int32ul,
"Transform3DGroupHandle" / Int32ul,
"TranslateTransform3DHandle" / Int32ul,
"ScaleTransform3DHandle" / Int32ul,
"RotateTransform3DHandle" / Int32ul,
"Channel" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5152, version=0)
class Microsoft_Windows_Dwm_Udwm_5152_0(Etw):
pattern = Struct(
"AnimationHandle" / Int32ul,
"ResourceHandle" / Int32ul,
"PropertyID" / Int32ul,
"Channel" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5153, version=0)
class Microsoft_Windows_Dwm_Udwm_5153_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"DPI" / Int32sl,
"LogicalOriginX" / Int32sl,
"LogicalOriginY" / Int32sl,
"PhysicalOriginX" / Int32sl,
"PhysicalOriginY" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5154, version=0)
class Microsoft_Windows_Dwm_Udwm_5154_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"left" / Int32sl,
"top" / Int32sl,
"right" / Int32sl,
"bottom" / Int32sl,
"HRESULT" / Int32ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5155, version=0)
class Microsoft_Windows_Dwm_Udwm_5155_0(Etw):
pattern = Struct(
"hwnd" / Int64ul,
"StoryboardId" / Int32sl,
"Target" / Int32sl,
"CreationMethod" / Int32sl,
"Left" / Int32sl,
"Top" / Int32sl,
"Right" / Int32sl,
"Bottom" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=5156, version=0)
class Microsoft_Windows_Dwm_Udwm_5156_0(Etw):
pattern = Struct(
"UseDelayStoryboard" / Int8ul,
"AbandonCrossfade" / Int8ul,
"FoundValidTarget" / Int8ul,
"IsResize" / Int8ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9001, version=0)
class Microsoft_Windows_Dwm_Udwm_9001_0(Etw):
pattern = Struct(
"secondarywindowpointer" / Int64ul,
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9002, version=0)
class Microsoft_Windows_Dwm_Udwm_9002_0(Etw):
pattern = Struct(
"secondarywindowpointer" / Int64ul,
"hwnd" / Int64ul,
"representationType" / Int32sl
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9003, version=0)
class Microsoft_Windows_Dwm_Udwm_9003_0(Etw):
pattern = Struct(
"secondarywindowpointer" / Int64ul,
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9004, version=0)
class Microsoft_Windows_Dwm_Udwm_9004_0(Etw):
pattern = Struct(
"CWindowData" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9005, version=0)
class Microsoft_Windows_Dwm_Udwm_9005_0(Etw):
pattern = Struct(
"hwndDestination" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9006, version=0)
class Microsoft_Windows_Dwm_Udwm_9006_0(Etw):
pattern = Struct(
"pwd" / Int64ul,
"hwnd" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=9009, version=0)
class Microsoft_Windows_Dwm_Udwm_9009_0(Etw):
pattern = Struct(
"pSnapshot" / Int64ul
)
@declare(guid=guid("a2d1c713-093b-43a7-b445-d09370ec9f47"), event_id=10000, version=0)
class Microsoft_Windows_Dwm_Udwm_10000_0(Etw):
pattern = Struct(
"PerfTrackId" / Int32ul
)
| [
"[email protected]"
] | |
2363d85ed35aec90334d9ce83011ed94d674768c | fd7720dfc136eb92dbff8cc31e0f83bb8bbced16 | /simulation/predictor.py | 6a3c114a0e8e87f5a466f0c9e3d90c5c6f75dd2b | [] | no_license | Villux/golden_goal | d134a1660dd32f0b4d05f720993dd23f8a064faf | f36f4dd0297e2e52c0f990cb3ac134f70fc16780 | refs/heads/master | 2020-03-27T01:53:09.863147 | 2018-11-15T15:40:04 | 2018-11-15T15:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | import numpy as np
from scipy.stats import poisson
class OutcomePredictor():
def __init__(self, model):
self.model = model
def predict_outcome_probabilities(self, x):
return self.model.predict_proba(x)[0]
def predict(self, feature_vector):
outcome_proba = self.predict_outcome_probabilities(feature_vector)
outcome = np.argmax(outcome_proba) - 1
return np.flip(outcome_proba, axis=0), outcome
class ScorePredictor():
def __init__(self, model):
self.model = model
@staticmethod
def get_goal_matrix(home_mu, away_mu):
home_goal_prob, away_goal_prob = [[poisson.pmf(i, team_avg) for i in range(0, 11)] for team_avg in [home_mu, away_mu]]
return np.outer(home_goal_prob, away_goal_prob)
@staticmethod
def get_outcome_probabilities(goal_matrix):
home_win = np.sum(np.tril(goal_matrix, -1))
draw = np.sum(np.diag(goal_matrix))
away_win = np.sum(np.triu(goal_matrix, 1))
return [home_win, draw, away_win]
def predict_score(self, x):
mu_score = self.model.predict(x)[0]
return mu_score
def predict(self, home_fv, away_fv):
home_mu = self.predict_score(home_fv)
away_mu = self.predict_score(away_fv)
goal_matrix = self.get_goal_matrix(home_mu, away_mu)
outcome_proba = self.get_outcome_probabilities(goal_matrix)
outcome = np.argmax(outcome_proba) - 1
return outcome_proba, outcome
| [
"[email protected]"
] | |
3ee0b6c42d9e38196d0f53cc53391ef21c05c5df | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03240/s759213189.py | 1d4f6ebd1ea989b7a9e3ce2eca0c51eb2d3ea865 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | n = int(input())
xyh = [list(map(int, input().split())) for _ in range(n)]
for i in range(101):
for j in range(101):
s = -1
flag = True
limit = float('inf')
for x, y, h in xyh:
if h != 0:
H = h + abs(x-i) + abs(y-j)
if s != -1:
if s != H:
flag = False
break
s = H
else:
limit = min(limit, abs(x-i) + abs(y-j))
if flag:
if s != -1 and s <= limit:
print(i, j, s)
exit()
elif s <= limit and limit == 1:
print(i, j, 1)
exit() | [
"[email protected]"
] | |
6d417a3569822b41ad57ff5456be22691250b6f6 | b66c12a4304c6af00d58a1f83e453dbc739ae60d | /survey/features/page_objects/investigators.py | 4e7cb29662126ec40e31708b493fda0f2b1d3c45 | [
"BSD-2-Clause"
] | permissive | madhavaramu/uSurvey | 060dae008f975a7cdb77ef8b0c5d820842422637 | 681e1d91fbedf94e840858e1ef09538777ce3e50 | refs/heads/uSurvey | 2020-04-11T06:28:17.220192 | 2016-12-06T13:24:45 | 2016-12-06T13:24:45 | 68,372,980 | 0 | 1 | null | 2016-09-16T11:03:44 | 2016-09-16T11:03:43 | null | UTF-8 | Python | false | false | 5,651 | py | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from time import sleep
from survey.features.page_objects.base import PageObject
from survey.investigator_configs import COUNTRY_PHONE_CODE
from rapidsms.contrib.locations.models import Location
from lettuce.django import django_url
from survey.models import EnumerationArea
class NewInvestigatorPage(PageObject):
url = "/investigators/new/"
def valid_page(self):
fields = ['name', 'mobile_number', 'confirm_mobile_number', 'male', 'age', 'backend']
for field in fields:
assert self.browser.is_element_present_by_name(field)
assert self.browser.find_by_css("span.add-on")[0].text == COUNTRY_PHONE_CODE
def get_investigator_values(self):
return self.values
def validate_detail_page_url(self):
assert self.browser.url == django_url(self.url)
def fill_valid_values(self, values, ea):
self.browser.find_by_id("location-value").value = Location.objects.create(name="Uganda").id
kampala = Location.objects.get(name="Kampala")
kampala_county = Location.objects.get(name="Kampala County")
kampala_subcounty = Location.objects.get(name="Subcounty")
kampala_parish = Location.objects.get(name="Parish")
kampala_village = Location.objects.get(name="Village")
ea = EnumerationArea.objects.get(name="EA")
self.fill_in_with_js('$("#location-district")', kampala.id)
self.fill_in_with_js('$("#location-county")', kampala_county.id)
self.fill_in_with_js('$("#location-subcounty")', kampala_subcounty.id)
self.fill_in_with_js('$("#location-parish")', kampala_parish.id)
self.fill_in_with_js('$("#location-village")', kampala_village.id)
self.fill_in_with_js('$("#widget_ea")', ea.id)
self.values = values
self.browser.fill_form(self.values)
class InvestigatorsListPage(PageObject):
url = '/investigators/'
def validate_fields(self):
self.validate_fields_present(["Investigators List", "Name", "Mobile Number", "Action"])
def validate_pagination(self):
self.browser.click_link_by_text("2")
def validate_presence_of_investigator(self, values):
assert self.browser.is_text_present(values['name'])
assert self.browser.is_text_present(values['mobile_number'])
def no_registered_invesitgators(self):
assert self.browser.is_text_present("There are no investigators currently registered for this location.")
def visit_investigator(self, investigator):
self.browser.click_link_by_text(investigator.name)
def see_confirm_block_message(self, confirmation_type, investigator):
self.is_text_present("Confirm: Are you sure you want to %s investigator %s" % (confirmation_type, investigator.name))
def validate_successful_edited_message(self):
self.is_text_present("Investigator successfully edited.")
def validate_page_url(self):
assert self.browser.url == django_url(self.url)
class FilteredInvestigatorsListPage(InvestigatorsListPage):
def __init__(self, browser, location_id):
self.browser = browser
self.url = '/investigators/?location=' + str(location_id)
def no_registered_invesitgators(self):
assert self.browser.is_text_present("There are no investigators currently registered for this county.")
class EditInvestigatorPage(PageObject):
def __init__(self, browser, investigator):
self.browser = browser
self.investigator = investigator
self.url = '/investigators/' + str(investigator.id) + '/edit/'
def validate_edit_investigator_url(self):
assert self.browser.url == django_url(self.url)
def change_name_of_investigator(self):
self.values = {
'name': 'Updated Name',
'mobile_number': self.investigator.mobile_number,
'confirm_mobile_number': self.investigator.mobile_number,
'male': self.investigator.male,
'age': self.investigator.age,
'level_of_education': self.investigator.level_of_education,
'language': self.investigator.language,
'location': self.investigator.location,
}
self.browser.fill_form(self.values)
def assert_user_saved_sucessfully(self):
self.is_text_present("User successfully edited.")
class InvestigatorDetailsPage(PageObject):
def __init__(self, browser, investigator):
self.browser = browser
self.investigator = investigator
self.url = '/investigators/' + str(investigator.id) + '/'
def validate_page_content(self):
details = {
'Name': self.investigator.name,
'Mobile Number': self.investigator.mobile_number,
'Age': str(self.investigator.age),
'Sex': 'Male' if self.investigator.male else 'Female',
'Highest Level of Education': self.investigator.level_of_education,
'Preferred Language of Communication': self.investigator.language,
'Country': 'Uganda',
'City': 'Kampala',
}
for label, text in details.items():
self.is_text_present(label)
self.is_text_present(text)
def validate_navigation_links(self):
assert self.browser.find_link_by_text(' Back')
def validate_back_link(self):
self.browser.find_link_by_href(django_url(InvestigatorsListPage.url))
def validate_detail_page_url(self):
assert self.browser.url == django_url(self.url)
def validate_successful_edited_message(self):
self.is_text_present("Investigator successfully edited.")
| [
"[email protected]"
] | |
bf1ff2870f3e7095975be2860e2c3f653b3482ee | 8948aa88edf47ca788ec464a9a6115e2ef46e4cf | /rastervision2/core/data/label_source/object_detection_label_source_config.py | 5ff23847d45a255ae547ea060760244f53e3de16 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | etraiger/raster-vision | 71687177f44ade3495e2adcbe87bcacb3948c168 | dc2f6bc688002375b91acc5df59d60c476022a96 | refs/heads/master | 2022-11-21T16:24:27.467471 | 2020-06-17T14:43:25 | 2020-06-17T14:43:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from rastervision2.core.data.label_source import (LabelSourceConfig,
ObjectDetectionLabelSource)
from rastervision2.core.data.vector_source import (VectorSourceConfig)
from rastervision2.pipeline.config import (register_config)
@register_config('object_detection_label_source')
class ObjectDetectionLabelSourceConfig(LabelSourceConfig):
"""Config for a read-only label source for object detection."""
vector_source: VectorSourceConfig
def build(self, class_config, crs_transformer, extent, tmp_dir):
vs = self.vector_source.build(class_config, crs_transformer)
return ObjectDetectionLabelSource(vs, extent)
| [
"[email protected]"
] | |
07907361b2de76395ad32702e0f64cc5fb4c1bd3 | 71d4381d6e78e1078720380fa5d26f323e8426bf | /python/services/firebase/beta/apple_app.py | 18349529600e966e53542dc66b96a41f66c2c809 | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/declarative-resource-client-library | 0edb9c3fc4bbc8f2df014ad8b92c13611b45fe26 | dd32fc7f3a041b5c73a1ad51f82871221d93b621 | refs/heads/main | 2023-08-19T00:12:42.859403 | 2023-08-16T16:46:52 | 2023-08-16T16:49:19 | 327,995,099 | 26 | 28 | Apache-2.0 | 2023-08-09T22:16:39 | 2021-01-08T19:34:57 | Go | UTF-8 | Python | false | false | 5,908 | py | # Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.firebase import apple_app_pb2
from google3.cloud.graphite.mmv2.services.google.firebase import apple_app_pb2_grpc
from typing import List
class AppleApp(object):
def __init__(
self,
name: str = None,
app_id: str = None,
display_name: str = None,
project_id: str = None,
bundle_id: str = None,
app_store_id: str = None,
team_id: str = None,
api_key_id: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.bundle_id = bundle_id
self.app_store_id = app_store_id
self.team_id = team_id
self.api_key_id = api_key_id
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = apple_app_pb2_grpc.FirebaseBetaAppleAppServiceStub(channel.Channel())
request = apple_app_pb2.ApplyFirebaseBetaAppleAppRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.bundle_id):
request.resource.bundle_id = Primitive.to_proto(self.bundle_id)
if Primitive.to_proto(self.app_store_id):
request.resource.app_store_id = Primitive.to_proto(self.app_store_id)
if Primitive.to_proto(self.team_id):
request.resource.team_id = Primitive.to_proto(self.team_id)
if Primitive.to_proto(self.api_key_id):
request.resource.api_key_id = Primitive.to_proto(self.api_key_id)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyFirebaseBetaAppleApp(request)
self.name = Primitive.from_proto(response.name)
self.app_id = Primitive.from_proto(response.app_id)
self.display_name = Primitive.from_proto(response.display_name)
self.project_id = Primitive.from_proto(response.project_id)
self.bundle_id = Primitive.from_proto(response.bundle_id)
self.app_store_id = Primitive.from_proto(response.app_store_id)
self.team_id = Primitive.from_proto(response.team_id)
self.api_key_id = Primitive.from_proto(response.api_key_id)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = apple_app_pb2_grpc.FirebaseBetaAppleAppServiceStub(channel.Channel())
request = apple_app_pb2.DeleteFirebaseBetaAppleAppRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.bundle_id):
request.resource.bundle_id = Primitive.to_proto(self.bundle_id)
if Primitive.to_proto(self.app_store_id):
request.resource.app_store_id = Primitive.to_proto(self.app_store_id)
if Primitive.to_proto(self.team_id):
request.resource.team_id = Primitive.to_proto(self.team_id)
if Primitive.to_proto(self.api_key_id):
request.resource.api_key_id = Primitive.to_proto(self.api_key_id)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteFirebaseBetaAppleApp(request)
@classmethod
def list(self, project, service_account_file=""):
stub = apple_app_pb2_grpc.FirebaseBetaAppleAppServiceStub(channel.Channel())
request = apple_app_pb2.ListFirebaseBetaAppleAppRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListFirebaseBetaAppleApp(request).items
def to_proto(self):
resource = apple_app_pb2.FirebaseBetaAppleApp()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.bundle_id):
resource.bundle_id = Primitive.to_proto(self.bundle_id)
if Primitive.to_proto(self.app_store_id):
resource.app_store_id = Primitive.to_proto(self.app_store_id)
if Primitive.to_proto(self.team_id):
resource.team_id = Primitive.to_proto(self.team_id)
if Primitive.to_proto(self.api_key_id):
resource.api_key_id = Primitive.to_proto(self.api_key_id)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| [
"[email protected]"
] | |
b39b35f53ad677c31b709586973e515f66e1326e | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/se1.py | 459da364809b9433ee40a48b855235528ebcb6ab | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sE1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
557ec87e81eefffe537c57d04561b95ab3ddc620 | 426a3f961982db52360a3e66c326b36a1d0a91ae | /object oriented programming/Polymorphism/Methodoverloading.py | cba84c2f6aac3d0ff0a41cbedb6b0535ba633546 | [] | no_license | Sibinvarghese/PythonPrograms | c121f8eaf06b2fcbbd15f64b54c13771e82e5d99 | c0e0951dab3ff0921ae2e25dfb086bb94d805464 | refs/heads/master | 2023-02-17T10:27:59.876242 | 2021-01-13T03:51:51 | 2021-01-13T03:51:51 | 329,186,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from smtplib import LMTP
class Maths:
def add(self):
num1,num2=10,20
print(num1+num2)
def add(self,num1):
num2=50
print(num1+num2)
def add(self,num1,num2): # This is Called recently implemented method
print(num1+num2)
obj=Maths()
obj.add(10,20)
# obj.add(30)
# obj.add() | [
"[email protected]"
] | |
04d24f794aa036733d2b00b2aa67933a0d67d36a | 37977ac3865502cac5b4d61f5e628d9129c711e2 | /2019-RoundG/shifts.py | ba90f6c58ef4541539cd7b38b92b8031a7fca127 | [] | no_license | YeahHuang/Kickstart | 932c3ee14a4afee3c1cbe7acb58cb65789c1225a | 08d141768cf93294be8a9c45fa39a4d14f9aa592 | refs/heads/master | 2021-07-08T05:52:00.769972 | 2020-07-19T15:43:13 | 2020-07-19T15:43:13 | 130,649,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | from bisect import bisect_left, bisect_right, insort_left, insort_right
from string import ascii_lowercase
from heapq import heappush, heappop, heapify
from collections import Counter, defaultdict
from itertools import product
global ans
global a,b,suma, sumb, n, h
def dfs(i, cur_a, cur_b):
global ans,a,b,suma, sumb, n, h
#print(i, cur_a, cur_b)
if cur_a>=h and cur_b>=h:
ans += 3**(n-i)
return
if (i<n) and (suma[-1]-suma[i]+cur_a>=h) and (sumb[-1]-sumb[i]+cur_b>=h):
dfs(i+1, cur_a+a[i], cur_b)
dfs(i+1, cur_a, cur_b+b[i])
dfs(i+1, cur_a+a[i], cur_b+b[i])
T = int(input())
for it in range(T):
n, h = map(int, input().split())
aa = list(map(int, input().split()))
bb = list(map(int, input().split()))
tmp = []
for i in range(n):
tmp.append((aa[i],bb[i]))
tmp.sort(reverse=True)
a,b = [],[]
for i in range(n):
a.append(tmp[i][0])
b.append(tmp[i][1])
suma, sumb = [0] * (n+1), [0]*(n+1)
for i in range(n):
suma[i+1] = suma[i] + a[i]
sumb[i+1] = sumb[i] + b[i]
ans = 0
dfs(0, 0, 0)
print("Case #%d: %d"%(it+1, ans))
| [
"[email protected]"
] | |
c70990a5217ff8db6ecdb0e8f3057427189a090b | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/next-greater-element-i/282267514.py | ff135633679788c1ecdd8891789789e9ac47b07c | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # title: next-greater-element-i
# detail: https://leetcode.com/submissions/detail/282267514/
# datetime: Thu Nov 28 20:22:56 2019
# runtime: 44 ms
# memory: 12.8 MB
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
res = [-1] * len(nums1)
pos = {n: i for i, n in enumerate(nums2)}
for i in range(len(nums1)):
for j in range(pos[nums1[i]] + 1, len(nums2)):
if nums2[j] > nums1[i]:
res[i] = nums2[j]
break
return res
| [
"[email protected]"
] | |
addab2413e8bdacb53c294cc23deaaa6c4a83ccd | 3a5a9f79e53f30a719cf51dedcc71a7153570103 | /apicatslist/models.py | 97c24a1b8c4348349a4bfab35b4d03ab1358e76b | [] | no_license | KiaraRaynS/Catslist-23-06-16- | b5ab80d6c363a161806133cfa1a1187728811395 | 7b8ec46684ca64e185e047d61379248e94390c64 | refs/heads/master | 2021-01-19T01:23:46.383024 | 2016-08-17T07:26:26 | 2016-08-17T07:26:26 | 61,828,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from rest_framework.authtoken.models import Token
from django.dispatch import receiver
from django.db.models.signals import post_save
@receiver(post_save, sender='auth.User')
def usertoken(**kwargs):
created = kwargs.get('created')
instance = kwargs.get('instance')
if created:
Token.objects.create(user=instance)
| [
"[email protected]"
] | |
d88b07c53a5a5106cb51936f34c9493a3c87903b | a6c7d1b57c7d4804fc651adc13416853ec6c86cd | /test/test_samples/test_samples.py | c8015cb6d081a6b61475b36b22a5d15d3bfdadf4 | [
"MIT"
] | permissive | executablebooks/mistletoe-ebp | 367c9c031a457f9f3c089bfb7a86b81309f8840b | 229812436726fd9b1af85c6e66ff8c81b415758d | refs/heads/master | 2022-12-05T03:17:00.975786 | 2020-08-06T17:53:59 | 2020-08-06T17:53:59 | 245,217,804 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | import os
from mistletoe import markdown
PATH = os.path.dirname(__file__)
def test_syntax(file_regression):
with open(os.path.join(PATH, "syntax.md")) as handle:
file_regression.check(markdown(handle.read()), extension=".html")
def test_jquery(file_regression):
with open(os.path.join(PATH, "jquery.md")) as handle:
file_regression.check(markdown(handle.read()), extension=".html")
| [
"[email protected]"
] | |
0a7042dfc022c5fa43ff7bc37a587ad6d7f6f90b | ebbcd3815d00123238cff2ae4ab47d8a0155df55 | /apps/xfzauth/forms.py | 86f8c7426b0e32abec5dba02d80ae78dd85648c6 | [] | no_license | zhaocheng1996/xfz_test | f4e1393e8a416371761b628a72af38bed2e3b5a0 | 724b17632ac7671db0baa460b988a59e45d5fc0d | refs/heads/master | 2020-04-30T16:26:28.833609 | 2019-04-02T06:51:54 | 2019-04-02T06:51:54 | 176,948,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,848 | py | from django import forms
from apps.forms import FormMixin
from django.core.cache import cache
from .models import User
class LoginForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11)
password = forms.CharField(max_length=20,min_length=6,error_messages={"max_length":"密码最多不能超过20个字符!","min_length":"密码最少不能少于6个字符!"})
remember = forms.IntegerField(required=False)
class RegisterForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11)
username =forms.CharField(max_length=20)
password1 = forms.CharField(max_length=20, min_length=6,
error_messages={"max_length": "密码最多不能超过20个字符!", "min_length": "密码最少不能少于6个字符!"})
password2 = forms.CharField(max_length=20,min_length=6,error_messages={"max_length":"密码最多不能超过20个字符!","min_length":"密码最少不能少于6个字符!"})
img_captcha = forms.CharField(min_length=4,max_length=4)
def clean(self):
cleaned_data = super(RegisterForm, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError('两次密码输入不一致!')
img_captcha = cleaned_data.get('img_captcha')
cached_img_captcha = cache.get(img_captcha.lower())
if not cached_img_captcha or cached_img_captcha.lower() != img_captcha.lower():
raise forms.ValidationError("图像验证码错误")
telephone = cleaned_data.get('telephone')
exists = User.objects.filter(telephone=telephone).exists()
if exists:
forms.ValidationError('该手机号码已经被注册!')
return cleaned_data
| [
"[email protected]"
] | |
9373a09c9d4289563607e011d4788444a4b72147 | 9bbb00c09aaaa19565d3fb8091af568decb5820f | /4_Intermediate_Importing_Python/2_Interacting_With_APIs_To_Import_Data_From_The_Internet/4_Checking_The_Wikipedia_API.py | 7bba80f6ccb09003b1c81c42f2c8dd0cf911ea65 | [] | no_license | PeterL64/UCDDataAnalytics | 4417fdeda9c64c2f350a5ba53b2a01b4bdc36fc7 | d6ff568e966caf954323ecf641769b7c79ccb83a | refs/heads/master | 2023-06-14T04:10:41.575025 | 2021-07-07T15:23:50 | 2021-07-07T15:23:50 | 349,780,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # Checking out the Wikipedia API
# Nested JSONs
# Import Packages
import requests
# Assign the URL to the variable: url
url = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&exintro=&titles=pizza'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Decode the JSON data into a dictionary: json_data
json_data = r.json()
# The variable pizza_extract holds the HTML of an extract from Wikipedia's Pizza page as a string.
# Print the Wikipedia page extract
pizza_extract = json_data['query']['pages']['24768']['extract']
print(pizza_extract)
| [
"[email protected]"
] | |
da8be8d0d99cfdb6bd0449079758dad05528e0bc | 8e1faad3d13e0862a2f8258667ede0167a60bbc3 | /src/connections.py | fc62edd6af041d6061c2dfbcf9ea248833c11e36 | [] | no_license | Reactorcore21/BEE2.4 | 80fc8cfcbd64f0fccd22446c25ceffb10fd4b08a | 188dd4d7ee1fc396b23100832a0fcf2ea0200333 | refs/heads/master | 2020-05-17T11:39:53.624838 | 2019-03-28T14:45:10 | 2019-03-28T14:45:10 | 183,689,931 | 0 | 0 | null | 2019-04-26T20:25:27 | 2019-04-26T20:25:26 | null | UTF-8 | Python | false | false | 58,480 | py | """Manages PeTI item connections.
This allows checking which items are connected to what, and also regenerates
the outputs with optimisations and custom settings.
"""
from enum import Enum
from collections import defaultdict
from srctools import VMF, Entity, Output, Property, conv_bool, Vec
import comp_consts as const
import instanceLocs
import conditions
import instance_traits
import srctools.logger
import vbsp_options
import antlines
import packing
from typing import Optional, Iterable, Dict, List, Set, Tuple
COND_MOD_NAME = "Item Connections"
LOGGER = srctools.logger.get_logger(__name__)
ITEM_TYPES = {} # type: Dict[str, ItemType]
# Targetname -> item
ITEMS = {} # type: Dict[str, Item]
# Outputs we need to use to make a math_counter act like
# the specified logic gate.
COUNTER_AND_ON = 'OnHitMax'
COUNTER_AND_OFF = 'OnChangedFromMax'
COUNTER_OR_ON = 'OnChangedFromMin'
COUNTER_OR_OFF = 'OnHitMin'
# We need different names for each kind of input type, so they don't
# interfere with each other. We use the 'inst_local' pattern not 'inst-local'
# deliberately so the actual item can't affect the IO input.
COUNTER_NAME = {
const.FixupVars.CONN_COUNT: '_counter',
const.FixupVars.CONN_COUNT_TBEAM: '_counter_polarity',
const.FixupVars.BEE_CONN_COUNT_A: '_counter_a',
const.FixupVars.BEE_CONN_COUNT_B: '_counter_b',
}
# A script to play timer sounds - avoids needing the ambient_generic.
TIMER_SOUND_SCRIPT = '''
function Precache() {{
self.PrecacheSoundScript("{snd}");
}}
function snd() {{
self.EmitSound("{snd}");
}}
'''
class ConnType(Enum):
"""Kind of Input A/B type, or TBeam type."""
DEFAULT = 'default' # Normal / unconfigured input
# Becomes one of the others based on item preference.
PRIMARY = TBEAM_IO = 'primary' # A Type, 'normal'
SECONDARY = TBEAM_DIR = 'secondary' # B Type, 'alt'
BOTH = 'both' # Trigger both simultaneously.
CONN_TYPE_NAMES = {
'none': ConnType.DEFAULT,
'a': ConnType.PRIMARY,
'prim': ConnType.PRIMARY,
'b': ConnType.SECONDARY,
'sec': ConnType.SECONDARY,
'ab': ConnType.BOTH,
'a+b': ConnType.BOTH,
} # type: Dict[str, ConnType]
CONN_TYPE_NAMES.update(ConnType._value2member_map_)
class InputType(Enum):
"""Indicates the kind of input behaviour to use."""
# Normal PeTI, pass activate/deactivate via proxy.
# For this the IO command is the original counter style.
DEFAULT = 'default'
# Have A and B inputs - acts like AND for both.
DUAL = 'dual'
AND = 'and' # AND input for an item.
OR = 'or' # OR input for an item.
OR_LOGIC = 'or_logic' # Treat as an invisible OR gate, no instance.
AND_LOGIC = 'and_logic' # Treat as an invisible AND gate, no instance.
# An item 'chained' to the next. Inputs should be moved to the output
# item in addition to our own output.
DAISYCHAIN = 'daisychain'
@property
def is_logic(self):
"""Is this a logic gate?"""
return self.value in ('and_logic', 'or_logic')
class PanelSwitchingStyle(Enum):
"""How the panel instance does its switching."""
CUSTOM = 'custom' # Some logic, we don't do anything.
EXTERNAL = 'external' # Provide a toggle to the instance.
INTERNAL = 'internal' # The inst has a toggle or panel, so we can reuse it.
class OutNames(str, Enum):
"""Fake input/outputs used in generation of the real ones."""
# Needs to match gameMan.Game.build_instance_data().
IN_ACT = 'ACTIVATE'
IN_DEACT = 'DEACTIVATE'
IN_SEC_ACT = 'ACTIVATE_SECONDARY'
IN_SEC_DEACT = 'DEACTIVATE_SECONDARY'
OUT_ACT = 'ON_ACTIVATED'
OUT_DEACT = 'ON_DEACTIVATED'
class FeatureMode(Enum):
"""When to apply a feature."""
DYNAMIC = 'dynamic' # Only if dynamic (inputs)
ALWAYS = 'always'
NEVER = 'never'
def valid(self, item: 'Item') -> bool:
"""Check if this is valid for the item."""
if self.value == 'dynamic':
return len(item.inputs) > 0
else:
return self.value == 'always'
CONN_NAMES = {
ConnType.DEFAULT: 'DEF',
ConnType.PRIMARY: 'A',
ConnType.SECONDARY: 'B',
ConnType.BOTH: 'A+B',
}
# The order signs are used in maps.
SIGN_ORDER = [
const.Signage.SHAPE_DOT,
const.Signage.SHAPE_MOON,
const.Signage.SHAPE_TRIANGLE,
const.Signage.SHAPE_CROSS,
const.Signage.SHAPE_SQUARE,
const.Signage.SHAPE_CIRCLE,
const.Signage.SHAPE_SINE,
const.Signage.SHAPE_SLASH,
const.Signage.SHAPE_STAR,
const.Signage.SHAPE_WAVY
]
SIGN_ORDER_LOOKUP = {
sign: index
for index, sign in
enumerate(SIGN_ORDER)
}
class ShapeSignage:
"""Represents a pair of signage shapes."""
__slots__ = (
'overlays',
'name',
'index',
'repeat_group',
'overlay_frames',
)
def __init__(self, overlays: List[Entity]):
if not overlays:
raise ValueError('No overlays')
self.overlays = list(overlays)
self.name = self.overlays[0]['targetname']
# Index in SIGN_ORDER
mat = self.overlays[0]['material']
self.index = SIGN_ORDER_LOOKUP[mat]
# Not useful...
for overlay in self.overlays:
del overlay['targetname']
# Groups these into repeats of the shapes.
self.repeat_group = 0
self.overlay_frames = [] # type: List[Entity]
def __iter__(self):
return iter(self.overlays)
def __lt__(self, other: 'ShapeSignage'):
"""Allow sorting in a consistent order."""
return self.name < other.name
def __gt__(self, other: 'ShapeSignage'):
"""Allow sorting in a consistent order."""
return self.name > other.name
class ItemType:
"""Represents an item, with inputs and outputs."""
def __init__(
self,
id: str,
default_dual: ConnType=ConnType.DEFAULT,
input_type: InputType=InputType.DEFAULT,
spawn_fire: FeatureMode=FeatureMode.NEVER,
invert_var: str = '0',
enable_cmd: List[Output]=(),
disable_cmd: List[Output]=(),
sec_invert_var: str='0',
sec_enable_cmd: List[Output]=(),
sec_disable_cmd: List[Output]=(),
output_type: ConnType=ConnType.DEFAULT,
output_act: Optional[Tuple[Optional[str], str]]=None,
output_deact: Optional[Tuple[Optional[str], str]]=None,
lock_cmd: List[Output]=(),
unlock_cmd: List[Output]=(),
output_lock: Optional[Tuple[Optional[str], str]]=None,
output_unlock: Optional[Tuple[Optional[str], str]]=None,
inf_lock_only: bool=False,
timer_sound_pos: Optional[Vec]=None,
timer_done_cmd: List[Output]=(),
force_timer_sound: bool=False,
timer_start: Optional[List[Tuple[Optional[str], str]]]=None,
timer_stop: Optional[List[Tuple[Optional[str], str]]]=None,
):
self.id = id
# How this item uses their inputs.
self.input_type = input_type
# True/False for always, $var, !$var for lookup.
self.invert_var = invert_var
# Fire the enable/disable commands after spawning to initialise
# the entity.
self.spawn_fire = spawn_fire
# IO commands for enabling/disabling the item.
# These are copied to the item, so it can have modified ones.
# We use tuples so all can reuse the same object.
self.enable_cmd = tuple(enable_cmd)
self.disable_cmd = tuple(disable_cmd)
# If no A/B type is set on the input, use this type.
# Set to None to indicate no secondary is present.
self.default_dual = default_dual
# Same for secondary items.
self.sec_invert_var = sec_invert_var
self.sec_enable_cmd = tuple(sec_enable_cmd)
self.sec_disable_cmd = tuple(sec_disable_cmd)
# Sets the affinity used for outputs from this item - makes the
# Input A/B converter items work.
# If DEFAULT, we use the value on the target item.
self.output_type = output_type
# (inst_name, output) commands for outputs.
# If they are None, it's not used.
# Logic items have preset ones of these from the counter.
if input_type is InputType.AND_LOGIC:
self.output_act = (None, COUNTER_AND_ON)
self.output_deact = (None, COUNTER_AND_OFF)
elif input_type is InputType.OR_LOGIC:
self.output_act = (None, COUNTER_OR_ON)
self.output_deact = (None, COUNTER_OR_OFF)
else: # Other types use the specified ones.
# Allow passing in an output with a blank command, to indicate
# no outputs.
if output_act == (None, ''):
self.output_act = None
else:
self.output_act = output_act
if output_deact == (None, ''):
self.output_deact = None
else:
self.output_deact = output_deact
# If set, automatically play tick-tock sounds when output is on.
self.timer_sound_pos = timer_sound_pos
# These are fired when the time elapses.
self.timer_done_cmd = timer_done_cmd
# If True, always add tick-tock sounds. If false, only when we have
# a timer dial.
self.force_timer_sound = force_timer_sound
# If set, these allow alternate inputs for controlling timers.
# Multiple can be given. If None, we use the normal output.
self.timer_start = timer_start
self.timer_stop = timer_stop
# For locking buttons, this is the command to reactivate,
# and force-lock it.
# If both aren't present, erase both.
if lock_cmd and unlock_cmd:
self.lock_cmd = tuple(lock_cmd)
self.unlock_cmd = tuple(unlock_cmd)
else:
self.lock_cmd = self.unlock_cmd = ()
# If True, the locking button must be infinite to enable the behaviour.
self.inf_lock_only = inf_lock_only
# For the target, the commands to lock/unlock the attached button.
self.output_lock = output_lock
self.output_unlock = output_unlock
@staticmethod
def parse(item_id: str, conf: Property):
"""Read the item type info from the given config."""
def get_outputs(prop_name):
"""Parse all the outputs with this name."""
return [
Output.parse(prop)
for prop in
conf.find_all(prop_name)
# Allow blank to indicate no output.
if prop.value != ''
]
enable_cmd = get_outputs('enable_cmd')
disable_cmd = get_outputs('disable_cmd')
lock_cmd = get_outputs('lock_cmd')
unlock_cmd = get_outputs('unlock_cmd')
inf_lock_only = conf.bool('inf_lock_only')
timer_done_cmd = get_outputs('timer_done_cmd')
if 'timer_sound_pos' in conf:
timer_sound_pos = conf.vec('timer_sound_pos')
force_timer_sound = conf.bool('force_timer_sound')
else:
timer_sound_pos = None
force_timer_sound = False
try:
input_type = InputType(
conf['Type', 'default'].casefold()
)
except ValueError:
raise ValueError('Invalid input type "{}": {}'.format(
item_id, conf['type'],
)) from None
invert_var = conf['invertVar', '0']
try:
spawn_fire = FeatureMode(conf['spawnfire', 'never'])
except ValueError:
# Older config option - it was a bool for always/never.
spawn_fire_bool = conf.bool('spawnfire', None)
if spawn_fire_bool is None:
raise # Nope, not a bool.
spawn_fire = FeatureMode.DYNAMIC if spawn_fire_bool else FeatureMode.NEVER
if input_type is InputType.DUAL:
sec_enable_cmd = get_outputs('sec_enable_cmd')
sec_disable_cmd = get_outputs('sec_disable_cmd')
try:
default_dual = CONN_TYPE_NAMES[
conf['Default_Dual', 'primary'].casefold()
]
except KeyError:
raise ValueError('Invalid default type for "{}": {}'.format(
item_id, conf['Default_Dual'],
)) from None
# We need an affinity to use when nothing else specifies it.
if default_dual is ConnType.DEFAULT:
raise ValueError('Must specify a default type for "{}"!'.format(
item_id,
)) from None
sec_invert_var = conf['sec_invertVar', '0']
else:
sec_enable_cmd = []
sec_disable_cmd = []
default_dual = sec_invert_var = None
try:
output_type = CONN_TYPE_NAMES[
conf['DualType', 'default'].casefold()
]
except KeyError:
raise ValueError('Invalid output affinity for "{}": {}'.format(
item_id, conf['DualType'],
)) from None
def get_input(prop_name: str):
"""Parse an input command."""
try:
return Output.parse_name(conf[prop_name])
except IndexError:
return None
out_act = get_input('out_activate')
out_deact = get_input('out_deactivate')
out_lock = get_input('out_lock')
out_unlock = get_input('out_unlock')
timer_start = timer_stop = None
if 'out_timer_start' in conf:
timer_start = [
Output.parse_name(prop.value)
for prop in conf.find_all('out_timer_start')
if prop.value
]
if 'out_timer_stop' in conf:
timer_stop = [
Output.parse_name(prop.value)
for prop in conf.find_all('out_timer_stop')
if prop.value
]
return ItemType(
item_id, default_dual, input_type, spawn_fire,
invert_var, enable_cmd, disable_cmd,
sec_invert_var, sec_enable_cmd, sec_disable_cmd,
output_type, out_act, out_deact,
lock_cmd, unlock_cmd, out_lock, out_unlock, inf_lock_only,
timer_sound_pos, timer_done_cmd, force_timer_sound,
timer_start, timer_stop,
)
class Item:
"""Represents one item/instance with IO."""
__slots__ = [
'inst',
'ind_panels',
'antlines', 'shape_signs',
'ant_wall_style', 'ant_floor_style',
'timer',
'inputs', 'outputs',
'item_type', 'io_outputs',
'enable_cmd', 'disable_cmd',
'sec_enable_cmd', 'sec_disable_cmd',
'ant_toggle_var',
]
def __init__(
self,
inst: Entity,
item_type: Optional[ItemType],
ant_floor_style: antlines.AntType,
ant_wall_style: antlines.AntType,
panels: Iterable[Entity]=(),
antlines: Iterable[Entity]=(),
shape_signs: Iterable[ShapeSignage]=(),
timer_count: int=None,
ant_toggle_var: str='',
):
self.inst = inst
self.item_type = item_type
# Associated indicator panels
self.ind_panels = set(panels) # type: Set[Entity]
# Overlays
self.antlines = set(antlines) # type: Set[Entity]
self.shape_signs = list(shape_signs)
# And the style to use for the antlines.
self.ant_floor_style = ant_floor_style
self.ant_wall_style = ant_wall_style
# If set, the item has special antlines. This is a fixup var,
# which gets the antline name filled in for us.
self.ant_toggle_var = ant_toggle_var
# None = Infinite/normal.
self.timer = timer_count
# From this item
self.outputs = set() # type: Set[Connection]
# To this item
self.inputs = set() # type: Set[Connection]
self.enable_cmd = item_type.enable_cmd
self.disable_cmd = item_type.disable_cmd
self.sec_enable_cmd = item_type.sec_enable_cmd
self.sec_disable_cmd = item_type.sec_disable_cmd
assert self.name, 'Blank name!'
def __repr__(self):
if self.item_type is None:
return '<Item (NO IO): "{}">'.format(self.name)
else:
return '<Item {}: "{}">'.format(self.item_type.id, self.name)
@property
def traits(self):
"""Return the set of instance traits for the item."""
return instance_traits.get(self.inst)
@property
def is_logic(self) -> bool:
"""Check if the input type is a logic type."""
return self.item_type.input_type.is_logic
@property
def name(self) -> str:
"""Return the targetname of the item."""
return self.inst['targetname']
@name.setter
def name(self, name: str):
"""Set the targetname of the item."""
self.inst['targetname'] = name
def output_act(self) -> Optional[Tuple[Optional[str], str]]:
"""Return the output used when this is activated."""
if self.item_type.spawn_fire.valid(self) and self.is_logic:
return None, 'OnUser2'
if self.item_type.input_type is InputType.DAISYCHAIN:
if self.inputs:
return None, COUNTER_AND_ON
return self.item_type.output_act
def output_deact(self) -> Optional[Tuple[Optional[str], str]]:
"""Return the output to use when this is deactivated."""
if self.item_type.spawn_fire.valid(self) and self.is_logic:
return None, 'OnUser1'
if self.item_type.input_type is InputType.DAISYCHAIN:
if self.inputs:
return None, COUNTER_AND_OFF
return self.item_type.output_deact
def timer_output_start(self) -> List[Tuple[Optional[str], str]]:
"""Return the output to use for starting timers."""
if self.item_type.timer_start is None:
out = self.output_act()
return [] if out is None else [out]
return self.item_type.timer_start
def timer_output_stop(self) -> List[Tuple[Optional[str], str]]:
"""Return the output to use for stopping timers."""
if self.item_type.timer_stop is None:
out = self.output_deact()
return [] if out is None else [out]
return self.item_type.timer_stop
def delete_antlines(self):
"""Delete the antlines and checkmarks outputting from this item."""
for ent in self.antlines:
ent.remove()
for ent in self.ind_panels:
ent.remove()
for sign in self.shape_signs:
for ent in sign.overlays:
ent.remove()
self.antlines.clear()
self.ind_panels.clear()
self.shape_signs.clear()
def transfer_antlines(self, item: 'Item'):
"""Transfer the antlines and checkmarks from this item to another."""
item.antlines.update(self.antlines)
item.ind_panels.update(self.ind_panels)
item.shape_signs.extend(self.shape_signs)
self.antlines.clear()
self.ind_panels.clear()
self.shape_signs.clear()
class Connection:
"""Represents a connection between two items."""
__slots__ = [
'_to', '_from', 'type', 'outputs',
]
def __init__(
self,
to_item: Item, # Item this is triggering
from_item: Item, # Item this comes from
conn_type=ConnType.DEFAULT,
outputs: Iterable[Output]=(),
):
self._to = to_item
self._from = from_item
self.type = conn_type
self.outputs = list(outputs)
def __repr__(self):
return '<Connection {} {} -> {}>'.format(
CONN_NAMES[self.type],
self._from.name,
self._to.name,
)
def add(self):
"""Add this to the directories."""
self._from.outputs.add(self)
self._to.inputs.add(self)
def remove(self):
"""Remove this from the directories."""
self._from.outputs.discard(self)
self._to.inputs.discard(self)
@property
def to_item(self) -> Item:
"""The item this connection is going to."""
return self._to
@to_item.setter
def to_item(self, item: Item):
self._to.inputs.discard(self)
self._to = item
item.inputs.add(self)
@property
def from_item(self) -> Item:
"""The item this connection comes from."""
return self._from
@from_item.setter
def from_item(self, item: Item):
self._from.outputs.discard(self)
self._from = item
item.outputs.add(self)
def collapse_item(item: Item):
"""Remove an item with a single input, transferring all IO."""
try:
[input_conn] = item.inputs # type: Connection
input_item = input_conn.from_item # type: Item
except ValueError:
raise ValueError('Too many inputs for "{}"!'.format(item.name))
input_conn.remove()
input_item.antlines |= item.antlines
input_item.ind_panels |= item.ind_panels
input_item.shape_signs += item.shape_signs
item.antlines.clear()
item.ind_panels.clear()
item.shape_signs.clear()
for conn in list(item.outputs):
conn.from_item = input_item
del ITEMS[item.name]
item.inst.remove()
def read_configs(conf: Property):
"""Build our connection configuration from the config files."""
for prop in conf.find_children('Connections'):
if prop.name in ITEM_TYPES:
raise ValueError('Duplicate item type "{}"'.format(prop.real_name))
ITEM_TYPES[prop.name] = ItemType.parse(prop.real_name, prop)
if 'item_indicator_panel' not in ITEM_TYPES:
raise ValueError('No checkmark panel item type!')
if 'item_indicator_panel_timer' not in ITEM_TYPES:
raise ValueError('No timer panel item type!')
def calc_connections(
vmf: VMF,
shape_frame_tex: List[str],
enable_shape_frame: bool,
antline_wall: antlines.AntType,
antline_floor: antlines.AntType,
):
"""Compute item connections from the map file.
This also fixes cases where items have incorrect checkmark/timer signs.
Instance Traits must have been calculated.
It also applies frames to shape signage to distinguish repeats.
"""
# First we want to match targetnames to item types.
toggles = {} # type: Dict[str, Entity]
overlays = defaultdict(set) # type: Dict[str, Set[Entity]]
# Accumulate all the signs into groups, so the list should be 2-long:
# sign_shapes[name, material][0/1]
sign_shape_overlays = defaultdict(list) # type: Dict[Tuple[str, str], List[Entity]]
# Indicator panels
panels = {} # type: Dict[str, Entity]
# We only need to pay attention for TBeams, other items we can
# just detect any output.
tbeam_polarity = {OutNames.IN_SEC_ACT, OutNames.IN_SEC_DEACT}
# Also applies to other items, but not needed for this analysis.
tbeam_io = {OutNames.IN_ACT, OutNames.IN_DEACT}
for inst in vmf.by_class['func_instance']:
inst_name = inst['targetname']
# No connections, so nothing to worry about.
if not inst_name:
continue
traits = instance_traits.get(inst)
if 'indicator_toggle' in traits:
toggles[inst['targetname']] = inst
# We do not use toggle instances.
inst.remove()
elif 'indicator_panel' in traits:
panels[inst['targetname']] = inst
else:
# Normal item.
try:
item_type = ITEM_TYPES[instance_traits.get_item_id(inst).casefold()]
except (KeyError, AttributeError):
# KeyError from no item type, AttributeError from None.casefold()
# These aren't made for non-io items. If it has outputs,
# that'll be a problem later.
pass
else:
# Pass in the defaults for antline styles.
ITEMS[inst_name] = Item(inst, item_type, antline_floor, antline_wall)
# Strip off the original connection count variables, these are
# invalid.
if item_type.input_type is InputType.DUAL:
del inst.fixup[const.FixupVars.CONN_COUNT]
for over in vmf.by_class['info_overlay']:
name = over['targetname']
mat = over['material']
if mat in SIGN_ORDER_LOOKUP:
sign_shape_overlays[name, mat.casefold()].append(over)
else:
# Antlines
overlays[name].add(over)
# Name -> signs pairs
sign_shapes = defaultdict(list) # type: Dict[str, List[ShapeSignage]]
# By material index, for group frames.
sign_shape_by_index = defaultdict(list) # type: Dict[int, List[ShapeSignage]]
for (name, mat), sign_pair in sign_shape_overlays.items():
# It's possible - but rare - for more than 2 to be in a pair.
# We have to just treat them as all in their 'pair'.
# Shouldn't be an issue, it'll be both from one item...
shape = ShapeSignage(sign_pair)
sign_shapes[name].append(shape)
sign_shape_by_index[shape.index].append(shape)
# Now build the connections and items.
for item in ITEMS.values():
input_items = [] # Instances we trigger
inputs = defaultdict(list) # type: Dict[str, List[Output]]
if item.inst.outputs and item.item_type is None:
raise ValueError(
'No connections for item "{}", '
'but outputs in the map!'.format(
instance_traits.get_item_id(item.inst)
)
)
for out in item.inst.outputs:
inputs[out.target].append(out)
# Remove the original outputs, we've consumed those already.
item.inst.outputs.clear()
# Pre-set the timer value, for items without antlines but with an output.
if const.FixupVars.TIM_DELAY in item.inst.fixup:
if item.item_type.output_act or item.item_type.output_deact:
item.timer = tim = item.inst.fixup.int(const.FixupVars.TIM_DELAY)
if not (1 <= tim <= 30):
# These would be infinite.
item.timer = None
for out_name in inputs:
# Fizzler base -> model/brush outputs, ignore these (discard).
# fizzler.py will regenerate as needed.
if out_name.endswith(('_modelStart', '_modelEnd', '_brush')):
continue
if out_name in toggles:
inst_toggle = toggles[out_name]
item.antlines |= overlays[inst_toggle.fixup['indicator_name']]
elif out_name in panels:
pan = panels[out_name]
item.ind_panels.add(pan)
if pan.fixup.bool(const.FixupVars.TIM_ENABLED):
item.timer = tim = pan.fixup.int(const.FixupVars.TIM_DELAY)
if not (1 <= tim <= 30):
# These would be infinite.
item.timer = None
else:
item.timer = None
else:
try:
inp_item = ITEMS[out_name]
except KeyError:
raise ValueError('"{}" is not a known instance!'.format(out_name))
else:
input_items.append(inp_item)
if inp_item.item_type is None:
raise ValueError(
'No connections for item "{}", '
'but inputs in the map!'.format(
instance_traits.get_item_id(inp_item.inst)
)
)
for inp_item in input_items: # type: Item
# Default A/B type.
conn_type = ConnType.DEFAULT
in_outputs = inputs[inp_item.name]
if inp_item.item_type.id == 'ITEM_TBEAM':
# It's a funnel - we need to figure out if this is polarity,
# or normal on/off.
for out in in_outputs:
if out.input in tbeam_polarity:
conn_type = ConnType.TBEAM_DIR
break
elif out.input in tbeam_io:
conn_type = ConnType.TBEAM_IO
break
else:
raise ValueError(
'Excursion Funnel "{}" has inputs, '
'but no valid types!'.format(inp_item.name)
)
conn = Connection(
inp_item,
item,
conn_type,
in_outputs,
)
conn.add()
# Make signage frames
shape_frame_tex = [mat for mat in shape_frame_tex if mat]
if shape_frame_tex and enable_shape_frame:
for shape_mat in sign_shape_by_index.values():
# Sort so which gets what frame is consistent.
shape_mat.sort()
for index, shape in enumerate(shape_mat):
shape.repeat_group = index
if index == 0:
continue # First, no frames..
frame_mat = shape_frame_tex[(index-1) % len(shape_frame_tex)]
for overlay in shape:
frame = overlay.copy()
shape.overlay_frames.append(frame)
vmf.add_ent(frame)
frame['material'] = frame_mat
frame['renderorder'] = 1 # On top
@conditions.make_result_setup('ChangeIOType')
def res_change_io_type_parse(props: Property):
"""Pre-parse all item types into an anonymous block."""
return ItemType.parse('<ChangeIOType: {:X}>'.format(id(props)), props)
@conditions.make_result('ChangeIOType')
def res_change_io_type(inst: Entity, res: Property):
"""Switch an item to use different inputs or outputs.
Must be done before priority level -250.
The contents are the same as that allowed in the input BEE2 block in
editoritems.
"""
try:
item = ITEMS[inst['targetname']]
except KeyError:
raise ValueError('No item with name "{}"!'.format(inst['targetname']))
item.item_type = res.value
# Overwrite these as well.
item.enable_cmd = res.value.enable_cmd
item.disable_cmd = res.value.disable_cmd
item.sec_enable_cmd = res.value.sec_enable_cmd
item.sec_disable_cmd = res.value.sec_disable_cmd
def do_item_optimisation(vmf: VMF):
"""Optimise redundant logic items."""
needs_global_toggle = False
for item in list(ITEMS.values()):
# We can't remove items that have functionality, or don't have IO.
if item.item_type is None or not item.item_type.input_type.is_logic:
continue
prim_inverted = conv_bool(conditions.resolve_value(
item.inst,
item.item_type.invert_var,
))
sec_inverted = conv_bool(conditions.resolve_value(
item.inst,
item.item_type.sec_invert_var,
))
# Don't optimise if inverted.
if prim_inverted or sec_inverted:
continue
inp_count = len(item.inputs)
if inp_count == 0:
# Totally useless, remove.
# We just leave the panel entities, and tie all the antlines
# to the same toggle.
needs_global_toggle = True
for ent in item.antlines:
ent['targetname'] = '_static_ind'
del ITEMS[item.name]
item.inst.remove()
elif inp_count == 1:
# Only one input, so AND or OR are useless.
# Transfer input item to point to the output(s).
collapse_item(item)
# The antlines need a toggle entity, otherwise they'll copy random other
# overlays.
if needs_global_toggle:
vmf.create_ent(
classname='env_texturetoggle',
origin=vbsp_options.get(Vec, 'global_ents_loc'),
targetname='_static_ind_tog',
target='_static_ind',
)
@conditions.meta_cond(-250, only_once=True)
def gen_item_outputs(vmf: VMF):
"""Create outputs for all items with connections.
This performs an optimization pass over items with outputs to remove
redundancy, then applies all the outputs to the instances. Before this,
connection count and inversion values are not valid. After this point,
items may not have connections altered.
"""
LOGGER.info('Generating item IO...')
pan_switching_check = vbsp_options.get(PanelSwitchingStyle, 'ind_pan_check_switching')
pan_switching_timer = vbsp_options.get(PanelSwitchingStyle, 'ind_pan_timer_switching')
pan_check_type = ITEM_TYPES['item_indicator_panel']
pan_timer_type = ITEM_TYPES['item_indicator_panel_timer']
logic_auto = vmf.create_ent(
'logic_auto',
origin=vbsp_options.get(Vec, 'global_ents_loc')
)
auto_logic = []
# Apply input A/B types to connections.
# After here, all connections are primary or secondary only.
for item in ITEMS.values():
for conn in item.outputs:
# If not a dual item, it's primary.
if conn.to_item.item_type.input_type is not InputType.DUAL:
conn.type = ConnType.PRIMARY
continue
# If already set, that is the priority.
if conn.type is not ConnType.DEFAULT:
continue
# Our item set the type of outputs.
if item.item_type.output_type is not ConnType.DEFAULT:
conn.type = item.item_type.output_type
else:
# Use the affinity of the target.
conn.type = conn.to_item.item_type.default_dual
do_item_optimisation(vmf)
has_timer_relay = False
# We go 'backwards', creating all the inputs for each item.
# That way we can change behaviour based on item counts.
for item in ITEMS.values():
if item.item_type is None:
continue
# Check we actually have timers, and that we want the relay.
if item.timer is not None and (
item.item_type.timer_sound_pos is not None or
item.item_type.timer_done_cmd
):
has_sound = item.item_type.force_timer_sound or len(item.ind_panels) > 0
add_timer_relay(item, has_sound)
has_timer_relay = has_timer_relay or has_sound
# Add outputs for antlines.
if item.antlines or item.ind_panels:
if item.timer is None:
add_item_indicators(item, pan_switching_check, pan_check_type)
else:
add_item_indicators(item, pan_switching_timer, pan_timer_type)
# Special case - inverted spawnfire items with no inputs need to fire
# off the activation outputs. There's no way to then deactivate those.
if not item.inputs and item.item_type.spawn_fire is FeatureMode.ALWAYS:
if item.is_logic:
# Logic gates need to trigger their outputs.
# Make a logic_auto temporarily for this to collect the
# outputs we need.
item.inst.clear_keys()
item.inst['classname'] = 'logic_auto'
auto_logic.append(item.inst)
else:
for cmd in item.enable_cmd:
logic_auto.add_out(
Output(
'OnMapSpawn',
conditions.local_name(
item.inst,
conditions.resolve_value(item.inst, cmd.target),
) or item.inst,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
delay=cmd.delay,
only_once=True,
)
)
if item.item_type.input_type is InputType.DUAL:
prim_inputs = [
conn
for conn in item.inputs
if conn.type is ConnType.PRIMARY or conn.type is ConnType.BOTH
]
sec_inputs = [
conn
for conn in item.inputs
if conn.type is ConnType.SECONDARY or conn.type is ConnType.BOTH
]
add_item_inputs(
item,
InputType.AND,
prim_inputs,
const.FixupVars.BEE_CONN_COUNT_A,
item.enable_cmd,
item.disable_cmd,
item.item_type.invert_var,
)
add_item_inputs(
item,
InputType.AND,
sec_inputs,
const.FixupVars.BEE_CONN_COUNT_B,
item.sec_enable_cmd,
item.sec_disable_cmd,
item.item_type.sec_invert_var,
)
else:
# If we have commands defined, try to add locking.
if item.item_type.output_unlock is not None:
add_locking(item)
add_item_inputs(
item,
item.item_type.input_type,
list(item.inputs),
const.FixupVars.CONN_COUNT,
item.enable_cmd,
item.disable_cmd,
item.item_type.invert_var,
)
# Check/cross instances sometimes don't match the kind of timer delay.
# We also might want to swap them out.
panel_timer = instanceLocs.resolve_one('[indPanTimer]', error=True)
panel_check = instanceLocs.resolve_one('[indPanCheck]', error=True)
for item in ITEMS.values():
desired_panel_inst = panel_check if item.timer is None else panel_timer
for pan in item.ind_panels:
pan['file'] = desired_panel_inst
pan.fixup[const.FixupVars.TIM_ENABLED] = item.timer is not None
if has_timer_relay:
# Write this VScript out.
timer_sound = vbsp_options.get(str, 'timer_sound')
with open('bee2/inject/timer_sound.nut', 'w') as f:
f.write(TIMER_SOUND_SCRIPT.format(snd=timer_sound))
# Make sure this is packed, since parsing the VScript isn't trivial.
packing.pack_files(vmf, timer_sound, file_type='sound')
for ent in auto_logic:
# Condense all these together now.
# User2 is the one that enables the target.
ent.remove()
for out in ent.outputs:
if out.output == 'OnUser2':
out.output = 'OnMapSpawn'
logic_auto.add_out(out)
out.only_once = True
LOGGER.info('Item IO generated.')
def add_locking(item: Item):
"""Create IO to control buttons from the target item.
This allows items to customise how buttons behave.
"""
# If more than one, it's not logical to lock the button.
try:
[lock_conn] = item.inputs # type: Connection
except ValueError:
return
lock_button = lock_conn.from_item
if item.item_type.inf_lock_only and lock_button.timer is not None:
return
# Check the button doesn't also activate other things -
# we need exclusive control.
# Also the button actually needs to be lockable.
if len(lock_button.outputs) != 1 or not lock_button.item_type.lock_cmd:
return
instance_traits.get(item.inst).add('locking_targ')
instance_traits.get(lock_button.inst).add('locking_btn')
for output, input_cmds in [
(item.item_type.output_lock, lock_button.item_type.lock_cmd),
(item.item_type.output_unlock, lock_button.item_type.unlock_cmd)
]:
if not output:
continue
out_name, out_cmd = output
for cmd in input_cmds:
if cmd.target:
target = conditions.local_name(lock_button.inst, cmd.target)
else:
target = lock_button.inst
item.inst.add_out(
Output(
out_cmd,
target,
cmd.input,
cmd.params,
delay=cmd.delay,
times=cmd.times,
inst_out=out_name,
)
)
def add_timer_relay(item: Item, has_sounds:bool):
"""Make a relay to play timer sounds, or fire once the outputs are done."""
rl_name = item.name + '_timer_rl'
relay = item.inst.map.create_ent(
'logic_relay',
targetname=rl_name,
startDisabled=0,
spawnflags=0,
)
if has_sounds:
relay['vscripts'] = 'bee2/timer_sound.nut'
if item.item_type.timer_sound_pos:
relay_loc = item.item_type.timer_sound_pos.copy()
relay_loc.localise(
Vec.from_str(item.inst['origin']),
Vec.from_str(item.inst['angles']),
)
relay['origin'] = relay_loc
else:
relay['origin'] = item.inst['origin']
for cmd in item.item_type.timer_done_cmd:
if cmd:
relay.add_out(Output(
'OnTrigger',
conditions.local_name(item.inst, cmd.target) or item.inst,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
inst_in=cmd.inst_in,
delay=item.timer + cmd.delay,
times=cmd.times,
))
if item.item_type.timer_sound_pos is not None and has_sounds:
timer_sound = vbsp_options.get(str, 'timer_sound')
timer_cc = vbsp_options.get(str, 'timer_sound_cc')
# The default sound has 'ticking' closed captions.
# So reuse that if the style doesn't specify a different noise.
# If explicitly set to '', we don't use this at all!
if timer_cc is None and timer_sound != 'Portal.room1_TickTock':
timer_cc = 'Portal.room1_TickTock'
if timer_cc:
timer_cc = 'cc_emit ' + timer_cc
for delay in range(item.timer):
relay.add_out(Output(
'OnTrigger',
'!self',
'CallScriptFunction',
'snd',
delay=delay,
))
if timer_cc:
relay.add_out(Output(
'OnTrigger',
'@command',
'Command',
timer_cc,
delay=delay,
))
for outputs, cmd in [
(item.timer_output_start(), 'Trigger'),
(item.timer_output_stop(), 'CancelPending')
]:
for out_name, out_cmd in outputs:
item.inst.add_out(Output(out_cmd, rl_name, cmd, inst_out=out_name))
def add_item_inputs(
item: Item,
logic_type: InputType,
inputs: List[Connection],
count_var: str,
enable_cmd: Iterable[Output],
disable_cmd: Iterable[Output],
invert_var: str,
):
"""Handle either the primary or secondary inputs to an item."""
item.inst.fixup[count_var] = len(inputs)
if len(inputs) == 0:
return # The rest of this function requires at least one input.
if logic_type is InputType.DEFAULT:
# 'Original' PeTI proxies.
for conn in inputs:
inp_item = conn.from_item
for output, input_cmds in [
(inp_item.output_act(), enable_cmd),
(inp_item.output_deact(), disable_cmd)
]:
if not output or not input_cmds:
continue
out_name, out_cmd = output
for cmd in input_cmds:
inp_item.inst.add_out(
Output(
out_cmd,
item.inst,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
inst_out=out_name,
inst_in=cmd.inst_in,
delay=cmd.delay,
)
)
return
elif logic_type is InputType.DAISYCHAIN:
# Another special case, these items AND themselves with their inputs.
# We aren't called if we have no inputs, so we don't need to handle that.
# We transform the instance into a counter, but first duplicate the
# instance as a new entity. This way references to the instance add
# outputs to the counter instead.
orig_inst = item.inst.copy()
orig_inst.map.add_ent(orig_inst)
orig_inst.outputs.clear()
counter = item.inst
counter.clear_keys()
counter['origin'] = orig_inst['origin']
counter['targetname'] = orig_inst['targetname'] + COUNTER_NAME[count_var]
counter['classname'] = 'math_counter'
counter['min'] = 0
counter['max'] = len(inputs) + 1
for output, input_name in [
(item.item_type.output_act, 'Add'),
(item.item_type.output_deact, 'Subtract')
]:
if not output:
continue
out_name, out_cmd = output
orig_inst.add_out(
Output(
out_cmd,
counter,
input_name,
'1',
inst_out=out_name,
)
)
for conn in inputs:
inp_item = conn.from_item
for output, input_name in [
(inp_item.output_act(), 'Add'),
(inp_item.output_deact(), 'Subtract')
]:
if not output:
continue
out_name, out_cmd = output
inp_item.inst.add_out(
Output(
out_cmd,
counter,
input_name,
'1',
inst_out=out_name,
)
)
return
is_inverted = conv_bool(conditions.resolve_value(
item.inst,
invert_var,
))
if is_inverted:
enable_cmd, disable_cmd = disable_cmd, enable_cmd
# Inverted logic items get a short amount of lag, so loops will propagate
# over several frames so we don't lock up.
if item.inputs and item.outputs:
enable_cmd = [
Output(
'',
out.target,
out.input,
out.params,
out.delay + 0.01,
times=out.times,
inst_in=out.inst_in,
)
for out in enable_cmd
]
disable_cmd = [
Output(
'',
out.target,
out.input,
out.params,
out.delay + 0.01,
times=out.times,
inst_in=out.inst_in,
)
for out in disable_cmd
]
needs_counter = len(inputs) > 1
# If this option is enabled, generate additional logic to fire the disable
# output after spawn (but only if it's not triggered normally.)
# We just use a relay to do this.
# User2 is the real enable input, User1 is the real disable input.
# The relay allows cancelling the 'disable' output that fires shortly after
# spawning.
if item.item_type.spawn_fire is not FeatureMode.NEVER:
if logic_type.is_logic:
# We have to handle gates specially, and make us the instance
# so future evaluation applies to this.
origin = item.inst['origin']
name = item.name
spawn_relay = item.inst
spawn_relay.clear_keys()
spawn_relay['origin'] = origin
spawn_relay['targetname'] = name
spawn_relay['classname'] = 'logic_relay'
# This needs to be blank so it'll be substituted by the instance
# name in enable/disable_cmd.
relay_cmd_name = ''
else:
relay_cmd_name = '@' + item.name + '_inv_rl'
spawn_relay = item.inst.map.create_ent(
classname='logic_relay',
targetname=relay_cmd_name,
origin=item.inst['origin'],
)
if is_inverted:
enable_user = 'User1'
disable_user = 'User2'
else:
enable_user = 'User2'
disable_user = 'User1'
spawn_relay['spawnflags'] = '0'
spawn_relay['startdisabled'] = '0'
spawn_relay.add_out(
Output('OnTrigger', '!self', 'Fire' + disable_user, only_once=True),
Output('OnSpawn', '!self', 'Trigger', delay=0.1),
)
for output_name, input_cmds in [
('On' + enable_user, enable_cmd),
('On' + disable_user, disable_cmd)
]:
if not input_cmds:
continue
for cmd in input_cmds:
spawn_relay.add_out(
Output(
output_name,
conditions.local_name(
item.inst,
conditions.resolve_value(item.inst, cmd.target),
) or item.inst,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
delay=cmd.delay,
times=cmd.times,
)
)
# Now overwrite input commands to redirect to the relay.
enable_cmd = [
Output('', relay_cmd_name, 'Fire' + enable_user),
Output('', relay_cmd_name, 'Disable', only_once=True),
]
disable_cmd = [
Output('', relay_cmd_name, 'Fire' + disable_user),
Output('', relay_cmd_name, 'Disable', only_once=True),
]
# For counters, swap out the input type.
if logic_type is InputType.AND_LOGIC:
logic_type = InputType.AND
elif logic_type is InputType.OR_LOGIC:
logic_type = InputType.OR
if needs_counter:
if logic_type.is_logic:
# Logic items are the counter. We convert the instance to that
# so we keep outputs added by items evaluated earlier.
origin = item.inst['origin']
name = item.name
counter = item.inst
counter.clear_keys()
counter['origin'] = origin
counter['targetname'] = name
counter['classname'] = 'math_counter'
else:
counter = item.inst.map.create_ent(
classname='math_counter',
targetname=item.name + COUNTER_NAME[count_var],
origin=item.inst['origin'],
)
counter['min'] = counter['startvalue'] = counter['StartDisabled'] = 0
counter['max'] = len(inputs)
for conn in inputs:
inp_item = conn.from_item
for output, input_name in [
(inp_item.output_act(), 'Add'),
(inp_item.output_deact(), 'Subtract')
]:
if not output:
continue
out_name, out_cmd = output
inp_item.inst.add_out(
Output(
out_cmd,
counter,
input_name,
'1',
inst_out=out_name,
)
)
if logic_type is InputType.AND:
count_on = COUNTER_AND_ON
count_off = COUNTER_AND_OFF
elif logic_type is InputType.OR:
count_on = COUNTER_OR_ON
count_off = COUNTER_OR_OFF
elif logic_type.is_logic:
# We don't add outputs here, the outputted items do that.
# counter is item.inst, so those are added to that.
LOGGER.info('LOGIC counter: {}', counter['targetname'])
return
else:
# Should never happen, not other types.
raise ValueError('Unknown counter logic type: ' + repr(logic_type))
for output_name, input_cmds in [
(count_on, enable_cmd),
(count_off, disable_cmd)
]:
if not input_cmds:
continue
for cmd in input_cmds:
counter.add_out(
Output(
output_name,
conditions.local_name(
item.inst,
conditions.resolve_value(item.inst, cmd.target),
) or item.inst,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
delay=cmd.delay,
times=cmd.times,
)
)
else: # No counter - fire directly.
for conn in inputs:
inp_item = conn.from_item
for output, input_cmds in [
(inp_item.output_act(), enable_cmd),
(inp_item.output_deact(), disable_cmd)
]:
if not output or not input_cmds:
continue
out_name, out_cmd = output
for cmd in input_cmds:
inp_item.inst.add_out(
Output(
out_cmd,
conditions.local_name(
item.inst,
conditions.resolve_value(item.inst, cmd.target),
) or item.inst,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
inst_out=out_name,
delay=cmd.delay,
times=cmd.times,
)
)
def add_item_indicators(
item: Item,
inst_type: PanelSwitchingStyle,
pan_item: ItemType,
):
"""Generate the commands for antlines, and restyle them."""
ant_name = '@{}_overlay'.format(item.name)
has_sign = len(item.ind_panels) > 0
for ind in item.antlines:
ind['targetname'] = ant_name
antlines.style_antline(ind, item.ant_wall_style, item.ant_floor_style)
# If the antline material doesn't toggle, the name is removed by
# style_antline(). So check if the overlay actually exists still, to
# see if we need to add the toggle.
has_ant = len(item.inst.map.by_target[ant_name]) > 0
# Special case - the item wants full control over its antlines.
if has_ant and item.ant_toggle_var:
item.inst.fixup[item.ant_toggle_var] = ant_name
# We don't have antlines to control.
has_ant = False
if inst_type is PanelSwitchingStyle.CUSTOM:
needs_toggle = has_ant
elif inst_type is PanelSwitchingStyle.EXTERNAL:
needs_toggle = has_ant or has_sign
elif inst_type is PanelSwitchingStyle.INTERNAL:
if (
item.item_type.timer_start is not None or
item.item_type.timer_stop is not None
):
# The item is doing custom control over the timer, so
# don't tie antline control to the timer.
needs_toggle = has_ant
inst_type = PanelSwitchingStyle.CUSTOM
else:
needs_toggle = has_ant and not has_sign
else:
raise ValueError('Bad switch style ' + repr(inst_type))
first_inst = True
for pan in item.ind_panels:
if inst_type is PanelSwitchingStyle.EXTERNAL:
pan.fixup[const.FixupVars.TOGGLE_OVERLAY] = ant_name
# Ensure only one gets the indicator name.
elif first_inst and inst_type is PanelSwitchingStyle.INTERNAL:
pan.fixup[const.FixupVars.TOGGLE_OVERLAY] = ant_name if has_ant else ' '
first_inst = False
else:
# VBSP and/or Hammer seems to get confused with totally empty
# instance var, so give it a blank name.
pan.fixup[const.FixupVars.TOGGLE_OVERLAY] = '-'
for outputs, input_cmds in [
(item.timer_output_start(), pan_item.enable_cmd),
(item.timer_output_stop(), pan_item.disable_cmd)
]:
if not input_cmds:
continue
for out_name, out in outputs:
for cmd in input_cmds:
item.inst.add_out(
Output(
out,
conditions.local_name(
pan,
conditions.resolve_value(item.inst, cmd.target),
) or pan,
conditions.resolve_value(item.inst, cmd.input),
conditions.resolve_value(item.inst, cmd.params),
inst_out=out_name,
inst_in=cmd.inst_in,
times=cmd.times,
)
)
if needs_toggle:
toggle = item.inst.map.create_ent(
classname='env_texturetoggle',
origin=Vec.from_str(item.inst['origin']) + (0, 0, 16),
targetname='toggle_' + item.name,
target=ant_name,
)
# Don't use the configurable inputs - if they want that, use custAntline.
for output, skin in [
(item.output_act(), '1'),
(item.output_deact(), '0')
]:
if not output:
continue
out_name, out = output
item.inst.add_out(
Output(
out,
toggle,
'SetTextureIndex',
skin,
inst_out=out_name,
)
)
| [
"[email protected]"
] | |
3693db274c621fd859666a09c9dfe5cf3ed1be5f | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/TRUNK-MIB.py | 7d86eb2a23af73441d1dbf90d947cf7900dc865b | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 20,621 | py | #
# PySNMP MIB module TRUNK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TRUNK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:27:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
MacAddress, = mibBuilder.importSymbols("BRIDGE-MIB", "MacAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Counter32, enterprises, Bits, TimeTicks, Unsigned32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, iso, Gauge32, NotificationType, IpAddress, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter32", "enterprises", "Bits", "TimeTicks", "Unsigned32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "iso", "Gauge32", "NotificationType", "IpAddress", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class PortList(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
marconi = MibIdentifier((1, 3, 6, 1, 4, 1, 326))
systems = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2))
external = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20))
dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1))
dlinkcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 1))
golf = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2))
golfproducts = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1))
golfcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2))
marconi_mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2)).setLabel("marconi-mgmt")
es1000Series = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24))
swPortTrunkPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6))
swSnoopPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7))
swIGMPPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8))
endOfMIB = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 9999), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: endOfMIB.setStatus('optional')
swPortTrunkTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1), )
if mibBuilder.loadTexts: swPortTrunkTable.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkTable.setDescription('This table specifys which ports group a set of ports(up to 8) into a single logical link.')
swPortTrunkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1), ).setIndexNames((0, "TRUNK-MIB", "swPortTrunkIndex"))
if mibBuilder.loadTexts: swPortTrunkEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkEntry.setDescription('A list of information specifies which ports group a set of ports(up to 8) into a single logical link.')
swPortTrunkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkIndex.setDescription('The index of logical port trunk.The device max support 3 trunk groups. The trunk group number depend on the existence of module.')
swPortTrunkName = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortTrunkName.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkName.setDescription('The name of logical port trunk.')
swPortTrunkModule = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkModule.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkModule.setDescription('Indicate which modules include in this Trunk. The value is up to 2.')
swPortTrunkMasterPort = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkMasterPort.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkMasterPort.setDescription('The object indicates the master port number of the port trunk entry.The first port of the trunk is implicitly configured to be the master logical port.When using Port Trunk, you can not configure the other ports of the group except the master port. Their configuration must be same as the master port (e.g. speed, duplex, enabled/disabled, flow control, and so on).')
swPortTrunkMemberNum = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortTrunkMemberNum.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkMemberNum.setDescription('Indicate how many number of ports is included in this Trunk. If the trunk is located at expansion module (i.e. es400LinkAggrIndex equals to 3) and the module is 100-TX or FX-MTRJ, the maximum number of ports in the trunk is 2. The maximum number of ports is 8 for other trunks.')
swPortTrunkState = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 6, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortTrunkState.setStatus('mandatory')
if mibBuilder.loadTexts: swPortTrunkState.setDescription('This object decide the port trunk enabled or disabled.')
swSnoopCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7, 1), )
if mibBuilder.loadTexts: swSnoopCtrlTable.setStatus('mandatory')
if mibBuilder.loadTexts: swSnoopCtrlTable.setDescription("A list of port snooping entries.Port snooping function provide an easy way to monitor traffic on any port. In this way any good packets appears on the source mirror port also shows up on the target mirror port and doesn't to reconstruct the LAN.")
swSnoopCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7, 1, 1), ).setIndexNames((0, "TRUNK-MIB", "swSnoopIndex"))
if mibBuilder.loadTexts: swSnoopCtrlEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swSnoopCtrlEntry.setDescription('A list of information provide an easy way to monitor traffic on any port. The use can bring a fancy network monitor attaching to any target mirror port and set the port to be monitored as source mirror port. ')
swSnoopIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swSnoopIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swSnoopIndex.setDescription('This object indicates the port snooping entry number.There is just only one now.')
swSnoopLogicSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swSnoopLogicSourcePort.setStatus('mandatory')
if mibBuilder.loadTexts: swSnoopLogicSourcePort.setDescription("This object indicates the number of port to be sniffed. The port number is the sequential (logical) number which is also applied to bridge MIB, etc. For instance, logical port 1~22 is mapped to module 1's 10/100Base-T TP ports, port 23~24 is mapped to module 2's 100FX/TX ports, port 25 indicates for module 3's Gigabit port, and so on.")
swSnoopLogicTargetPort = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swSnoopLogicTargetPort.setStatus('mandatory')
if mibBuilder.loadTexts: swSnoopLogicTargetPort.setDescription("This object indicates switch which port will sniff another port. A trunk port member cannot be configured as a target Snooping port. The port number is the sequential (logical) number which is also applied to bridge MIB, etc. For instance, logical port 1~22 is mapped to module 1's 10/100Base-T TP ports, port 23~24 is mapped to module 2's 100FX/TX ports, port 25 indicates for module 3's Gigabit port, and so on.")
swSnoopState = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 7, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swSnoopState.setStatus('mandatory')
if mibBuilder.loadTexts: swSnoopState.setDescription('This object indicates the status of this entry. other(1) - this entry is currently in use but the conditions under which it will remain so are different from each of the following values. disable(2) - Snoop funtion disable. enable(3) - Snoop funtion enable and Snoop received or transmit packet by snoop source port.')
swIGMPCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 1), )
if mibBuilder.loadTexts: swIGMPCtrlTable.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPCtrlTable.setDescription("The table controls the Vlan's IGMP function. Its scale depends on current VLAN state (swVlanInfoStatus). If VLAN is disabled or in Mac-Base mode, there is only one entry in the table, with index 1. If VLAN is in Port-Base or 802.1q mode, the number of entries can be up to 12, with index range from 1 to 12.")
swIGMPCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 1, 1), ).setIndexNames((0, "TRUNK-MIB", "swIGMPCtrlIndex"))
if mibBuilder.loadTexts: swIGMPCtrlEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPCtrlEntry.setDescription('The entry in IGMP control table (swIGMPCtrlTable). The entry is effective only when IGMP capture switch (swDevIGMPCaptureState) is enabled.')
swIGMPCtrlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPCtrlIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPCtrlIndex.setDescription('This object indicates the IGMP control entry number.Its scale depends on current VLAN state (es400VlanInfoStatus). If VLAN is disabled or in Mac-Base mode, there is only one entry in the table, with index 1. If VLAN is in Port-Base or 802.1q mode, the number of entries is 12, with index range from 1 to 12.')
swIGMPCtrlVid = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swIGMPCtrlVid.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPCtrlVid.setDescription("This object indicates the IGMP control entry's VLAN id. If VLAN is disabled or in Mac-Base mode, the Vid is always 0 and cannot be changed by management users. If VLAN is in Port-Base mode, the Vid is arranged from 1 to 12 , fixed form. If VLAN is in 802.1q mode, the Vid setting can vary from 1 to 4094 by management user, and the Vid in each entry must be unique in the IGMP Control Table.")
swIGMPCtrlTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 9999)).clone(300)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swIGMPCtrlTimer.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPCtrlTimer.setDescription('The timer value for sending IGMP query packet when none was sent by the multicast router in the LAN. The timer works in per-VLAN basis. Our device will be activated to send the query message if the timer is expired. Please reference RFC2236-1997. And it recommends a default of 125 seconds. The timeout value must be at least 30 seconds.')
swIGMPCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swIGMPCtrlState.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPCtrlState.setDescription('This object indicates the status of this entry. other(1) - this entry is currently in use but the conditions under which it will remain so are different from each of the following values. disable(2) - IGMP funtion is disabled for this entry. enable(3) - IGMP funtion is enabled for this entry.')
swIGMPInfoTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 2), )
if mibBuilder.loadTexts: swIGMPInfoTable.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPInfoTable.setDescription('The table contains the number current IGMP query packets which is captured by this device, as well as the IGMP query packets sent by the device.')
swIGMPInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 2, 1), ).setIndexNames((0, "TRUNK-MIB", "swIGMPInfoIndex"))
if mibBuilder.loadTexts: swIGMPInfoEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPInfoEntry.setDescription('Information about current IGMP query information, provided that swDevIGMPCaptureState and swIGMPCtrlState of associated VLAN entry are all enabled.')
swIGMPInfoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPInfoIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPInfoIndex.setDescription('This object indicates the IGMP query information entry number. It could be up to 12 entries, depending on current number of VLAN entries.')
swIGMPInfoVid = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPInfoVid.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPInfoVid.setDescription('This object indicates the Vid of associated IGMP info table entry. It follows swIGMPCtrlVid in the associated entry of IGMP control table (swIGMPCtrlTable).')
swIGMPInfoQueryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPInfoQueryCount.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPInfoQueryCount.setDescription('This object indicates the number of query packets received since the IGMP function enabled, in per-VLAN basis.')
swIGMPInfoTxQueryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPInfoTxQueryCount.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPInfoTxQueryCount.setDescription('This object indicates the send count of IGMP query messages, in per-VLAN basis. In case of IGMP timer expiration, the switch sends IGMP query packets to related VLAN member ports and increment this object by 1.')
swIGMPTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3), )
if mibBuilder.loadTexts: swIGMPTable.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPTable.setDescription('The table containing current IGMP information which captured by this device, provided that swDevIGMPCaptureState and swIGMPCtrlState of associated VLAN entry are all enabled. Note that the priority of IGMP table entries is lower than Filtering Table, i.e. if there is a table hash collision between the entries of IGMP Table and Filtering Table inside the switch H/W address table, then Filtering Table entry overwrite the colliding entry of IGMP Table. See swFdbFilterTable description also.')
swIGMPEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3, 1), ).setIndexNames((0, "TRUNK-MIB", "swIGMPVid"), (0, "TRUNK-MIB", "swIGMPGroupIpAddr"))
if mibBuilder.loadTexts: swIGMPEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPEntry.setDescription('Information about current IGMP information which captured by this device , provided that swDevIGMPCaptureState and swIGMPCtrlState of associated VLAN entry are all enabled.')
swIGMPVid = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPVid.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPVid.setDescription('This object indicates the Vid of individual IGMP table entry. It shows the Vid of IGMP report information captured on network.')
swIGMPGroupIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPGroupIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPGroupIpAddr.setDescription('This object is identify group ip address which is captured from IGMP packet, in per-Vlan basis.')
swIGMPMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPMacAddr.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPMacAddr.setDescription('This object is identify mac address which is corresponding to swIGMPGroupIpAddr, in per-Vlan basis..')
swIGMPPortMap = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3, 1, 4), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPPortMap.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPPortMap.setDescription("This object indicates which ports are belong to the same multicast group, in per-Vlan basis. Each multicast group has a octect string to indicate with port map. The most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. In module 1 (base module), there are 22 100M twisted-pair ports (port 1..22) which is mapped to the PortMap's port 1 to 22 respectively. In module 2 (slot 1 module), there are 2 100M FX/100 TX (or a single port 100M FX) ports which is mapped to the PortMap's port 23,24 respectively (if the module is a single port 100M FX, it is just mapped to port 23 and port 24 is ignored). Module 3 (slot 2 module) is a single-port Gigabit Ethernet and it is mapped to the PortMap's port 25.")
swIGMPIpGroupReportCount = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 24, 8, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPIpGroupReportCount.setStatus('mandatory')
if mibBuilder.loadTexts: swIGMPIpGroupReportCount.setDescription('This object indicate how much report packet was receive by our device corresponding with this entry from IGMP function enabled, in per-Vlan basis. .')
mibBuilder.exportSymbols("TRUNK-MIB", swPortTrunkIndex=swPortTrunkIndex, swSnoopCtrlEntry=swSnoopCtrlEntry, swIGMPPortMap=swIGMPPortMap, swIGMPVid=swIGMPVid, swPortTrunkMemberNum=swPortTrunkMemberNum, swIGMPInfoEntry=swIGMPInfoEntry, dlink=dlink, swSnoopCtrlTable=swSnoopCtrlTable, swIGMPCtrlIndex=swIGMPCtrlIndex, swIGMPCtrlState=swIGMPCtrlState, swIGMPEntry=swIGMPEntry, swIGMPTable=swIGMPTable, swPortTrunkName=swPortTrunkName, endOfMIB=endOfMIB, swSnoopIndex=swSnoopIndex, swSnoopLogicSourcePort=swSnoopLogicSourcePort, marconi=marconi, swPortTrunkTable=swPortTrunkTable, swIGMPCtrlVid=swIGMPCtrlVid, swIGMPInfoTable=swIGMPInfoTable, golfcommon=golfcommon, swIGMPPackage=swIGMPPackage, golfproducts=golfproducts, swIGMPInfoVid=swIGMPInfoVid, swIGMPGroupIpAddr=swIGMPGroupIpAddr, swIGMPInfoQueryCount=swIGMPInfoQueryCount, dlinkcommon=dlinkcommon, external=external, swPortTrunkMasterPort=swPortTrunkMasterPort, swIGMPCtrlTable=swIGMPCtrlTable, swSnoopLogicTargetPort=swSnoopLogicTargetPort, swIGMPInfoTxQueryCount=swIGMPInfoTxQueryCount, swPortTrunkEntry=swPortTrunkEntry, swIGMPCtrlTimer=swIGMPCtrlTimer, swPortTrunkModule=swPortTrunkModule, swSnoopState=swSnoopState, es1000Series=es1000Series, PortList=PortList, swIGMPIpGroupReportCount=swIGMPIpGroupReportCount, systems=systems, swPortTrunkState=swPortTrunkState, swSnoopPackage=swSnoopPackage, swPortTrunkPackage=swPortTrunkPackage, swIGMPCtrlEntry=swIGMPCtrlEntry, swIGMPMacAddr=swIGMPMacAddr, marconi_mgmt=marconi_mgmt, golf=golf, swIGMPInfoIndex=swIGMPInfoIndex)
| [
"[email protected]"
] | |
4c801b6dbe162b6d05d2b0f9077b249bed8522be | dad12d5cfdae207fd5391eca45c86ef1bd6447bd | /zmq_plugin_bridge/monitor.py | b76e1cbc0f7e6c0935494b55ebcc6f65e0750259 | [] | no_license | JozzOfLyfe/Threejs | 08f9282bba37d2a15b3d9d585930e0293f3b138f | 50f585b3afa0dcaacced7bec7727a75fc40c0f99 | refs/heads/master | 2021-01-20T19:39:40.302550 | 2016-07-22T16:39:12 | 2016-07-22T16:39:12 | 62,829,783 | 0 | 1 | null | 2016-07-22T16:39:12 | 2016-07-07T18:33:59 | JavaScript | UTF-8 | Python | false | false | 3,995 | py | # coding: utf-8
import json
import logging
import sys
from zmq import green as zmq
from zmq_plugin.plugin import Plugin
from zmq_plugin.schema import (validate, PandasJsonEncoder,
encode_content_data,
decode_content_data)
import IPython
import arrow
import gevent
import jsonschema
logger = logging.getLogger(__name__)
def run_plugin(sio, plugin, log_level=None, namespace=None):
if log_level is not None:
logging.basicConfig(level=log_level)
plugin.reset()
def get_message():
msg_frames = plugin.subscribe_socket.recv_multipart(zmq.NOBLOCK)
message_str = msg_frames[-1]
try:
# Decode message from first (and only expected) frame.
message = json.loads(message_str)
# Validate message against schema.
validate(message)
except jsonschema.ValidationError:
logger.error('Unexpected message', exc_info=True)
raise
else:
return message
start = arrow.now()
while True:
try:
try:
message = get_message()
except zmq.Again:
gevent.sleep(.01)
continue
msg_timestamp = arrow.get(message['header']['date'])
delta_time = (msg_timestamp - start).total_seconds()
time_info = msg_timestamp.strftime('%H:%M:%S')
if delta_time > .25:
time_info += (' +%-5.1f' % delta_time)
print 72 * '-'
if message['header']['msg_type'] == 'execute_reply':
msg_info = (time_info +
' [{header[target]}<-{header[source]}] '
'{content[command]}'.format(**message))
print msg_info
data = decode_content_data(message)
try:
json_data = json.dumps(data, cls=PandasJsonEncoder)
except:
import pdb; pdb.set_trace()
content = encode_content_data(json_data, mime_type=
'application/json')
message['content'].update(content)
elif 'content' in message:
msg_info = (time_info +
' [{header[source]}->{header[target]}] '
'{content[command]}'.format(**message))
data = decode_content_data(message)
try:
json_data = json.dumps(data, cls=PandasJsonEncoder)
except:
import pdb; pdb.set_trace()
content = encode_content_data(json_data, mime_type=
'application/json')
message['content'].update(content)
else:
msg_info = (time_info +
' [{header[source]}->{header[target]}] '
'<{header[msg_type]}>'.format(**message))
print msg_info
sio.emit('zmq', message, namespace=namespace)
start = arrow.now()
except KeyboardInterrupt:
IPython.embed()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
args = parse_args()
plugin = Plugin(args.name, args.hub_uri, {zmq.SUBSCRIBE: ''})
run_plugin(plugin, args.log_level)
| [
"[email protected]"
] | |
48653f7afa85e42005da9783c9c96c4c43582d04 | a8c0867109974ff7586597fe2c58521277ab9d4d | /LC88.py | b314c31d431d89596bf96bcee8a1b853a8a3b00a | [] | no_license | Qiao-Liang/LeetCode | 1491b01d2ddf11495fbc23a65bb6ecb74ac1cee2 | dbdb227e12f329e4ca064b338f1fbdca42f3a848 | refs/heads/master | 2023-05-06T15:00:58.939626 | 2021-04-21T06:30:33 | 2021-04-21T06:30:33 | 82,885,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
curr = 0
for num in nums2:
while curr < m and nums1[curr] <= num:
curr += 1
temp_curr = m
m += 1
while temp_curr > curr:
nums1[temp_curr] = nums1[temp_curr - 1]
temp_curr -= 1
nums1[curr] = num
print nums1
# if nums2:
# for m_idx in range(m):
# if nums1[m_idx] > nums2[0]:
# nums1[m_idx], nums2[0] = nums2[0], nums1[m_idx]
# for n_idx in range(n - 1):
# if nums2[n_idx] > nums2[n_idx + 1]:
# nums2[n_idx], nums2[n_idx + 1] = nums2[n_idx + 1], nums2[n_idx]
# else:
# break
# nums1 = nums1[:m] + nums2[:n]
# print(nums1)
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
# nums1 = [-1,0,0,3,3,3,0,0,0]
# m = 6
# nums2 = [1,2,2]
# n = 3
sol = Solution()
sol.merge(nums1, m, nums2, n)
| [
"[email protected]"
] | |
c8dc51f72ee1d978a9c54c4bb3c56eb9723a0326 | d3210868266ce3f0c17d0777c157da82402d3ed7 | /horizon/openstack_dashboard/settings.py | 6eb84bf96d8d5465df333da4b507662f95fb5a62 | [
"Apache-2.0"
] | permissive | cauberong099/openstack | 4f0bb1671bf3f2421a756c8b3bfcd7b344e07096 | 4fc261d37d84126d364de50fbc6ca98b8dc8dd39 | refs/heads/master | 2021-01-10T19:44:22.108399 | 2015-03-28T02:46:21 | 2015-03-28T02:46:21 | 33,003,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,429 | py | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import warnings
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
from openstack_dashboard.static_settings import STATICFILES_DIRS # noqa
warnings.formatwarning = lambda message, category, *args, **kwargs: \
'%s: %s' % (category.__name__, message)
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
BIN_DIR = os.path.abspath(os.path.join(ROOT_PATH, '..', 'bin'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_BRANDING = 'OpenStack Dashboard'
WEBROOT = '/'
LOGIN_URL = None
LOGOUT_URL = None
LOGIN_REDIRECT_URL = None
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
STATIC_URL = '/static/'
ROOT_URLCONF = 'openstack_dashboard.urls'
HORIZON_CONFIG = {
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
'js_spec_files': [],
}
DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'saigonnhungngaymua'
EMAIL_PORT = 587
# Set to True to allow users to upload images to glance via Horizon server.
# When enabled, a file form field will appear on the create image form.
# See documentation for deployment considerations.
HORIZON_IMAGES_ALLOW_UPLOAD = True
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', _('Select format')),
('aki', _('AKI - Amazon Kernel Image')),
('ami', _('AMI - Amazon Machine Image')),
('ari', _('ARI - Amazon Ramdisk Image')),
('iso', _('ISO - Optical Disk Image')),
('ova', _('OVA - Open Virtual Appliance')),
('qcow2', _('QCOW2 - QEMU Emulator')),
('raw', _('Raw')),
('vdi', _('VDI - Virtual Disk Image')),
('vhd', _('VHD - Virtual Hard Disk')),
('vmdk', _('VMDK - Virtual Machine Disk')),
]
}
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'horizon.middleware.HorizonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'horizon.context_processors.horizon',
'openstack_dashboard.context_processors.openstack',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/scss', 'django_pyscss.compressor.DjangoScssFilter'),
)
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
)
COMPRESS_ENABLED = True
COMPRESS_OUTPUT_DIR = 'dashboard'
COMPRESS_CSS_HASHING_METHOD = 'hash'
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
INSTALLED_APPS = [
'openstack_dashboard',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django_pyscss',
'openstack_dashboard.django_pyscss_fix',
'compressor',
'horizon',
'openstack_auth',
'bootstrapform',
'registration',
#'openstack_dashboard.dashboards.cloudcustom',
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end of validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 10
# When using cookie-based sessions, log error when the session cookie exceeds
# the following size (common browsers drop cookies above a certain size):
SESSION_COOKIE_MAX_SIZE = 4093
# when doing upgrades, it may be wise to stick to PickleSerializer
# NOTE(berendt): Check during the K-cycle if this variable can be removed.
# https://bugs.launchpad.net/horizon/+bug/1349463
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
LANGUAGES = (
('de', 'German'),
('en', 'English'),
('en-au', 'Australian English'),
('en-gb', 'British English'),
('es', 'Spanish'),
('fr', 'French'),
('hi', 'Hindi'),
('ja', 'Japanese'),
('ko', 'Korean (Korea)'),
('nl', 'Dutch (Netherlands)'),
('pl', 'Polish'),
('pt-br', 'Portuguese (Brazil)'),
('sr', 'Serbian'),
('zh-cn', 'Simplified Chinese'),
('zh-tw', 'Chinese (Taiwan)'),
)
LANGUAGE_CODE = 'en'
LANGUAGE_COOKIE_NAME = 'horizon_language'
USE_I18N = True
USE_L10N = True
USE_TZ = True
OPENSTACK_KEYSTONE_DEFAULT_ROLE = '_member_'
DEFAULT_EXCEPTION_REPORTER_FILTER = 'horizon.exceptions.HorizonReporterFilter'
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json',
'volume': 'cinder_policy.json',
'image': 'glance_policy.json',
'orchestration': 'heat_policy.json',
'network': 'neutron_policy.json',
'telemetry': 'ceilometer_policy.json',
}
SECRET_KEY = None
LOCAL_PATH = None
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
}
ADD_INSTALLED_APPS = []
try:
from local.local_settings import * # noqa
except ImportError:
logging.warning("No local_settings file found.")
if not WEBROOT.endswith('/'):
WEBROOT += '/'
if LOGIN_URL is None:
LOGIN_URL = WEBROOT + 'auth/login/'
if LOGOUT_URL is None:
LOGOUT_URL = WEBROOT + 'auth/logout/'
if LOGIN_REDIRECT_URL is None:
LOGIN_REDIRECT_URL = WEBROOT
# Load the pluggable dashboard settings
import openstack_dashboard.enabled
import openstack_dashboard.local.enabled
from openstack_dashboard.utils import settings
INSTALLED_APPS = list(INSTALLED_APPS) # Make sure it's mutable
settings.update_dashboards(
[
openstack_dashboard.enabled,
openstack_dashboard.local.enabled,
],
HORIZON_CONFIG,
INSTALLED_APPS,
)
INSTALLED_APPS[0:0] = ADD_INSTALLED_APPS
# Ensure that we always have a SECRET_KEY set, even when no local_settings.py
# file is present. See local_settings.py.example for full documentation on the
# horizon.utils.secret_key module and its use.
if not SECRET_KEY:
if not LOCAL_PATH:
LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local')
from horizon.utils import secret_key
SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH,
'.secret_key_store'))
from openstack_dashboard import policy_backend
POLICY_CHECK_FUNCTION = policy_backend.check
# Add HORIZON_CONFIG to the context information for offline compression
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': STATIC_URL,
'HORIZON_CONFIG': HORIZON_CONFIG,
}
SITE_URL = 'http://127.0.0.1'
PORT_BASE = '9999'
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
# during django reloads and an active user is logged in, the monkey
# patch below will not otherwise be applied in time - resulting in developers
# appearing to be logged out. In typical production deployments this section
# below may be omitted, though it should not be harmful
from openstack_auth import utils as auth_utils
auth_utils.patch_middleware_get_user()
| [
"[email protected]"
] | |
94a551407625272ce72f502bb937aa316c9dff30 | 9abc2f4fbf1b31b5a56507437b4a8d9c3f3db7e6 | /movies/urls.py | 5b2dfb674ea70f3227f015265650fdd1faabd2f2 | [] | no_license | odbalogun/ticketr | e9fe8461d66dabe395f0e1af8fbecc67dbb16e97 | 94f24c82f407f861f1614a151feb3fdd62b283e5 | refs/heads/master | 2022-11-30T22:40:30.931160 | 2019-08-09T14:34:38 | 2019-08-09T14:34:38 | 188,833,600 | 0 | 0 | null | 2022-11-22T03:50:30 | 2019-05-27T11:50:07 | Python | UTF-8 | Python | false | false | 232 | py | from django.urls import path
from .views import MovieDetailView, MovieListView
app_name = 'movies'
urlpatterns = [
path('', MovieListView.as_view(), name='list'),
path('<pk>/', MovieDetailView.as_view(), name='detail'),
]
| [
"[email protected]"
] | |
ece1320d825400cb0e92383a86db4b36ec2f815f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03845/s420639446.py | 2bb83b6030c44ccd6ef52dd5ec54eab56c4cdf2c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from sys import stdin
n = int(input())
t = list(map(int, input().split()))
m = int(input())
sum_t = sum(t)
ans = []
for _ in range(m):
p, x = map(int, stdin.readline().strip().split())
ans.append(sum_t - t[p-1] + x)
for i in ans:
print(i) | [
"[email protected]"
] | |
af0c74728ea18068852ff28b99f97799037e4d9f | c43913a3e0d8d838a29dec4f886251c189577ec0 | /tomviz/python/tomviz/web.py | 00da60559f216faa2e6398ba69a9495189998126 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tjcorona/tomviz | 82dd30e3374f08cfb3021da739b9915f925677eb | 43e4382fa2027758652395c2d9df90ce84c4e487 | refs/heads/master | 2021-01-20T13:36:48.092818 | 2017-02-17T20:20:23 | 2017-02-17T20:20:23 | 82,701,638 | 0 | 0 | null | 2017-02-21T16:25:17 | 2017-02-21T16:25:17 | null | UTF-8 | Python | false | false | 10,137 | py | import base64
import os
import shutil
import zipfile
from paraview import simple
from paraview.web.dataset_builder import ImageDataSetBuilder
from paraview.web.dataset_builder import CompositeDataSetBuilder
DATA_DIRECTORY = 'data'
DATA_FILENAME = 'data.tomviz'
BASE64_DATA_FILENAME = 'data.tomviz.base64'
HTML_FILENAME = 'tomviz.html'
HTML_WITH_DATA_FILENAME = 'tomviz_data.html'
def web_export(executionPath, destPath, exportType, nbPhi, nbTheta):
# Destination directory for data
dest = '%s/data' % destPath
# Extract initial setting for view
view = simple.GetRenderView()
viewState = {}
for prop in ['CameraViewUp', 'CameraPosition']:
viewState[prop] = tuple(view.GetProperty(prop).GetData())
# Camera handling
deltaPhi = 360 / nbPhi
deltaTheta = int(180 / nbTheta)
thetaMax = deltaTheta
while thetaMax + deltaTheta < 90:
thetaMax += deltaTheta
camera = {
'type': 'spherical',
'phi': range(0, 360, deltaPhi),
'theta': range(-thetaMax, thetaMax + 1, deltaTheta)
}
# Choose export mode:
if exportType == 0:
export_images(dest, camera)
if exportType == 1:
export_volume_exploration_images(dest, camera)
if exportType == 2:
export_contour_exploration_images(dest, camera)
if exportType == 3:
export_layers(dest, camera)
# Zip data directory
zipData(destPath)
# Copy application
copy_viewer(destPath, executionPath)
create_standalone_html(destPath)
# Restore initial parameters
for prop in viewState:
view.GetProperty(prop).SetData(viewState[prop])
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def zipData(destinationPath):
dstFile = os.path.join(destinationPath, DATA_FILENAME)
dataDir = os.path.join(destinationPath, DATA_DIRECTORY)
if os.path.exists(dataDir):
with zipfile.ZipFile(dstFile, mode='w') as zf:
for dirName, subdirList, fileList in os.walk(dataDir):
for fname in fileList:
fullPath = os.path.join(dirName, fname)
filePath = os.path.relpath(fullPath, dataDir)
relPath = '%s/%s' % (DATA_DIRECTORY, filePath)
zf.write(fullPath, arcname=relPath,
compress_type=zipfile.ZIP_STORED)
shutil.rmtree(dataDir)
def get_proxy(id):
session = simple.servermanager.ActiveConnection.Session
remoteObj = session.GetRemoteObject(int(id))
return simple.servermanager._getPyProxy(remoteObj)
def copy_viewer(destinationPath, executionPath):
searchPath = executionPath
for upDirTry in range(4):
searchPath = os.path.normpath(os.path.join(searchPath, '..'))
for root, dirs, files in os.walk(searchPath):
if HTML_FILENAME in files and root != destinationPath:
srcFile = os.path.join(root, HTML_FILENAME)
shutil.copy(srcFile, destinationPath)
return
def create_standalone_html(destinationPath):
dataPath = os.path.join(destinationPath, DATA_FILENAME)
tmpData = os.path.join(destinationPath, BASE64_DATA_FILENAME)
if not os.path.exists(dataPath):
return
with file(tmpData, mode='w') as dataOut:
with file(dataPath) as dataIn:
base64.encode(dataIn, dataOut)
srcHtmlPath = os.path.join(destinationPath, HTML_FILENAME)
dstHtmlPath = os.path.join(destinationPath, HTML_WITH_DATA_FILENAME)
with file(tmpData) as data:
with file(srcHtmlPath) as srcHtml:
with file(dstHtmlPath, mode='w') as dstHtml:
for line in srcHtml:
if '<script type="text/javascript">' in line:
dstHtml.write(line)
dstHtml.write('var data = "')
for dl in data:
dstHtml.write(dl[:-1])
dstHtml.write('";\n')
elif '<input' in line:
pass
else:
dstHtml.write(line)
os.remove(tmpData)
def add_scene_item(scene, name, proxy, view):
hasNormal = False
hasColor = False
colors = {}
representation = {}
rep = simple.GetRepresentation(proxy, view)
# Skip hidden object or volume
if not rep.Visibility or rep.Representation == 'Volume':
return
for prop in ['Representation']:
representation[prop] = rep.GetProperty(prop).GetData()
pdInfo = proxy.GetPointDataInformation()
numberOfPointArrays = pdInfo.GetNumberOfArrays()
for idx in range(numberOfPointArrays):
array = pdInfo.GetArray(idx)
rangeValues = array.GetRange(-1)
if array.Name == 'Normals':
hasNormal = True
if array.Name not in ['vtkValidPointMask', 'Normals']:
hasColor = True
if rangeValues[0] == rangeValues[1]:
colors[array.Name] = {'constant': rangeValues[0]}
else:
colors[array.Name] = {
'location': 'POINT_DATA',
'range': [i for i in rangeValues]
}
# Get information about cell data arrays
cdInfo = proxy.GetCellDataInformation()
numberOfCellArrays = cdInfo.GetNumberOfArrays()
for idx in range(numberOfCellArrays):
array = cdInfo.GetArray(idx)
hasColor = True
colors[array.Name] = {
'location': 'CELL_DATA',
'range': array.GetRange(-1)
}
# Make sure Normals are available if lighting by normals
source = proxy
if not hasColor or rep.Representation == 'Outline':
colors = {'solid': {'constant': 0}}
elif 'normal' in scene['light'] and not hasNormal:
rep.Visibility = 0
surface = simple.ExtractSurface(Input=proxy)
surfaceWithNormals = simple.GenerateSurfaceNormals(Input=surface)
source = surfaceWithNormals
scene['scene'].append({
'name': name,
'source': source,
'colors': colors,
'representation': representation
})
def get_volume_piecewise(view):
renderer = view.GetClientSideObject().GetRenderer()
for volume in renderer.GetVolumes():
if volume.GetClassName() == 'vtkVolume':
return volume.GetProperty().GetScalarOpacity()
return None
def get_contour():
for key, value in simple.GetSources().iteritems():
if 'FlyingEdges' in key[0]:
return value
return None
# -----------------------------------------------------------------------------
# Image based exporter
# -----------------------------------------------------------------------------
def export_images(destinationPath, camera):
view = simple.GetRenderView()
idb = ImageDataSetBuilder(destinationPath, 'image/jpg', camera)
idb.start(view)
idb.writeImages()
idb.stop()
# -----------------------------------------------------------------------------
# Image based Volume exploration
# -----------------------------------------------------------------------------
def export_volume_exploration_images(destinationPath, camera):
view = simple.GetRenderView()
pvw = get_volume_piecewise(view)
maxOpacity = 0.5
nbSteps = 10
step = 250.0 / float(nbSteps)
span = step * 0.4
values = [float(v + 1) * step for v in range(0, nbSteps)]
if pvw:
idb = ImageDataSetBuilder(destinationPath, 'image/jpg', camera)
idb.getDataHandler().registerArgument(priority=1, name='volume',
values=values, ui='slider',
loop='reverse')
idb.start(view)
for volume in idb.getDataHandler().volume:
pvw.RemoveAllPoints()
pvw.AddPoint(float(volume) - span, 0)
pvw.AddPoint(float(volume), maxOpacity)
pvw.AddPoint(float(volume) + span, 0)
pvw.AddPoint(255, 0)
idb.writeImages()
idb.stop()
else:
print('No Volume module available')
# -----------------------------------------------------------------------------
# Image based Contour exploration
# -----------------------------------------------------------------------------
def export_contour_exploration_images(destinationPath, camera):
view = simple.GetRenderView()
contour = get_contour()
nbSteps = 10
step = 250.0 / float(nbSteps)
values = [float(v + 1) * step for v in range(0, nbSteps)]
if contour:
idb = ImageDataSetBuilder(destinationPath, 'image/jpg', camera)
idb.getDataHandler().registerArgument(priority=1, name='contour',
values=values, ui='slider',
loop='reverse')
idb.start(view)
for contourValue in idb.getDataHandler().contour:
contour.Value = [contourValue]
idb.writeImages()
idb.stop()
else:
print('No contour module available')
# -----------------------------------------------------------------------------
# Composite exporter
# -----------------------------------------------------------------------------
def export_layers(destinationPath, camera):
view = simple.GetRenderView()
fp = tuple(view.CameraFocalPoint)
cp = tuple(view.CameraPosition)
vu = tuple(view.CameraViewUp)
sceneDescription = {
'size': tuple(view.ViewSize),
'light': ['intensity'], # 'normal', intensity
'camera': {
'CameraViewUp': vu,
'CameraPosition': cp,
'CameraFocalPoint': fp
},
'scene': []
}
for key, value in simple.GetSources().iteritems():
add_scene_item(sceneDescription, key[0], value, view)
# Generate export
dsb = CompositeDataSetBuilder(
destinationPath, sceneDescription, camera, {}, {}, view)
dsb.start()
dsb.writeData()
dsb.stop(compress=False)
| [
"[email protected]"
] | |
a85b28586d5142d231dad051aaec6bbade136a1b | b501a5eae1018c1c26caa96793c6ee17865ebb2d | /data_compression_and_archiving/gzip/gzip_seek.py | a927d0dc0cf73da48f9b2242a96373f98fa54184 | [] | no_license | jincurry/standard_Library_Learn | 12b02f9e86d31ca574bb6863aefc95d63cc558fc | 6c7197f12747456e0f1f3efd09667682a2d1a567 | refs/heads/master | 2022-10-26T07:28:36.545847 | 2018-05-04T12:54:50 | 2018-05-04T12:54:50 | 125,447,397 | 0 | 1 | null | 2022-10-02T17:21:50 | 2018-03-16T01:32:50 | Python | UTF-8 | Python | false | false | 371 | py | import gzip
with gzip.open('example.txt.gz', 'rb') as input_file:
print('Entire file:')
all_data = input_file.read()
print(all_data)
expected = all_data[5:15]
input_file.seek(0)
input_file.seek(5)
print('Starting at position 5 for 10 bytes:')
partial = input_file.read(10)
print(partial)
print()
print(expected == partial)
| [
"[email protected]"
] | |
f0aefe5cd151f1d2f53ec1a7689f18e5fbc73561 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/test/test_tcl.py | ba93edb736e64213e90fc8006166fdef79cc82e4 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5d2b96fe46f7dcde01e44c07777df5125ffa70a5ff4a41c8f3d9ac5b08420a16
size 30385
| [
"[email protected]"
] | |
fa8fee3c0516297125b7949177ef554e11e8ddc6 | e9524305812608ae488884e5af133655f385a08a | /VueDjangoFrameWorkShop/settings.py | c2a37f3d41801705d17a8950fab2b5b60c610331 | [
"MIT"
] | permissive | GryffindorMuggle/python3.6-django2.1-django-rest-framework | 66c43ba70477c1d099309c6a80d0b788d2636de3 | 4e21db7ce9eff77d030deb74de33189352010765 | refs/heads/master | 2020-03-26T13:48:52.823676 | 2018-08-16T08:33:24 | 2018-08-16T08:33:24 | 144,958,050 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,848 | py | """
Django settings for VueDjangoFrameWorkShop project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import datetime
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from past.builtins import execfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.insert(0,BASE_DIR)
sys.path.insert(0,os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0,os.path.join(BASE_DIR, 'extra_apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y5yew=o5yey*9ydgt74-st11qkt$3n_i9r-c+aw$lt0%x3%a^)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# 设置邮箱和用户名和手机号均可登录
AUTHENTICATION_BACKENDS = (
'users.views.CustomBackend',
'social_core.backends.weibo.WeiboOAuth2',
'social_core.backends.qq.QQOAuth2',
'social_core.backends.weixin.WeixinOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# 此处重载是为了使我们的UserProfile生效
AUTH_USER_MODEL = "users.UserProfile"
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users.apps.UsersConfig',
'goods.apps.GoodsConfig',
'trade.apps.TradeConfig',
'user_operation.apps.UserOperationConfig',
'xadmin',
'crispy_forms',
'DjangoUeditor',
'rest_framework',
'django_filters',
'corsheaders',
'rest_framework.authtoken',
'social_django',
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:3000'
)
ROOT_URLCONF = 'VueDjangoFrameWorkShop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'VueDjangoFrameWorkShop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'vue_shop',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
"OPTIONS": {"init_command": "SET default_storage_engine=INNODB;"}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
# 语言改为中文
LANGUAGE_CODE = 'zh-hans'
# 时区改为上海
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
# 数据库存储使用时间,True时间会被存为UTC的时间
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# 设置上传文件,图片访问路径
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# 所有与drf相关的设置写在这里面
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '100/day',
'user': '1000/day'
}
}
# 与drf的jwt相关的设置
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=3600),
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
}
# 手机号码正则表达式
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
# 云片网设置
APIKEY = ''
# 缓存过期时间
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# 支付宝相关的key路径
private_key_path = os.path.join(BASE_DIR, 'apps/trade/keys/private_2048.txt')
ali_pub_key_path = os.path.join(BASE_DIR, 'apps/trade/keys/alipay_key_2048.txt')
# 第三方登录相关
SOCIAL_AUTH_WEIBO_KEY = 'foobar'
SOCIAL_AUTH_WEIBO_SECRET = 'bazqux'
SOCIAL_AUTH_QQ_KEY = 'foobar'
SOCIAL_AUTH_QQ_SECRET = 'bazqux'
SOCIAL_AUTH_WEIXIN_KEY = 'foobar'
SOCIAL_AUTH_WEIXIN_SECRET = 'bazqux'
# sentry设置
import os
import raven
RAVEN_CONFIG = {
'dsn': 'https://<key>:<secret>@sentry.io/<project>',
}
REMOTE_DEBUG = True
PROJECT_ROOT = os.path.join(BASE_DIR, 'VueDjangoFrameWorkShop')
if DEBUG and REMOTE_DEBUG:
try:
execfile(os.path.join(PROJECT_ROOT, 'dev_settings.py'))
except IOError:
pass
elif DEBUG:
try:
execfile(os.path.join(PROJECT_ROOT, 'local_settings.py'))
except IOError:
pass
else:
try:
execfile(os.path.join(PROJECT_ROOT, 'dev_settings.py'))
except IOError:
pass
| [
"[email protected]"
] | |
c1c531d2e16c942466ed4ed651adf50c6a1f6ed3 | 9d64a438cdfe4f3feb54f2f0dc7431139c4b9fb9 | /type_converter/icon_type_converter/actions/string_to_boolean/action.py | fe85b8b0d3c9bb94d56678f388240e518fe856a5 | [
"MIT"
] | permissive | PhilippBehmer/insightconnect-plugins | 5ad86faaccc86f2f4ed98f7e5d518e74dddb7b91 | 9195ddffc575bbca758180473d2eb392e7db517c | refs/heads/master | 2021-07-25T02:13:08.184301 | 2021-01-19T22:51:35 | 2021-01-19T22:51:35 | 239,746,770 | 0 | 0 | MIT | 2020-02-11T11:34:52 | 2020-02-11T11:34:51 | null | UTF-8 | Python | false | false | 864 | py | import insightconnect_plugin_runtime
from .schema import StringToBooleanInput, StringToBooleanOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
class StringToBoolean(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='string_to_boolean',
description=Component.DESCRIPTION,
input=StringToBooleanInput(),
output=StringToBooleanOutput())
def run(self, params={}):
try:
return {
Output.OUTPUT: params.get(Input.INPUT).lower() == "true"
}
except Exception as e:
raise PluginException(
cause="Converting error.",
assistance="Check input",
data=e
)
| [
"[email protected]"
] | |
b5f2f15ab28f76cf471a10e139fe25ecda72997b | 489574745e7823d1dc22bda0676d6fa1b42ef547 | /src/django_wools/templatetags/wools_for_wt_images.py | 77493569dd7609d8a85ed03caa5f1fcbb2ef3fd6 | [] | no_license | juliengueperoux/django-wools | 9419239b27170fc701708817f1c3e19c57edcf7c | d7a0dd98a873cb2d41a3b26d18ddd243fe6d22b6 | refs/heads/master | 2023-09-05T07:31:34.831561 | 2021-05-17T13:45:41 | 2021-05-17T13:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | import re
from enum import Enum
from typing import Iterator, NamedTuple, Optional
from django import template
from wagtail.images.models import AbstractImage
from ..settings import wool_settings
register = template.Library()
class WidthInfo(NamedTuple):
"""
Computed width from generate_widths()
"""
width: int
pixel_ratio: float
def generate_widths(spec_width) -> Iterator[WidthInfo]:
"""
Generates width and device pixel ratios based on settings bounds and the
initial spec width of the image.
Parameters
----------
spec_width
On-screen width of the image
"""
width = spec_width
max_width = spec_width * wool_settings.MAX_PIXEL_RATIO
for _ in range(0, 1000):
yield WidthInfo(width, round(float(width) / float(spec_width), 4))
if width > max_width:
return
width *= 1 + wool_settings.INCREMENT_STEP_PERCENT / 100.0
class WagtailSizeOperation(Enum):
"""
Allowed Wagtail operations
"""
max = "max"
min = "min"
width = "width"
fill = "fill"
class WagtailSizeSpec(NamedTuple):
"""
Parsed Wagtail size specification
"""
operation: WagtailSizeOperation
width: int
height: Optional[int]
zoom: int = 0
def __str__(self):
"""
Un-parses the string
"""
out = f"{self.operation.value}-{self.width}"
if self.height:
out += f"x{self.height}"
if self.zoom:
out += f"-c{self.zoom}"
return out
@classmethod
def parse(cls, spec) -> "WagtailSizeSpec":
"""
Parses a spec and returns the parsed tuple
"""
ops = "|".join(WagtailSizeOperation._member_names_) # noqa
exp = re.compile(
rf"(?P<op>{ops})-(?P<width>\d+)x(?P<height>\d+)?(-c(?P<zoom>\d+))?"
)
if not (m := exp.match(spec)):
raise ValueError(
f'Provided spec "{spec}" cannot be parsed. Please bear in '
f'mind that "scale" and "height" operations are not permitted '
f"since they do not have any width constraint."
)
return cls(
operation=WagtailSizeOperation(m.group("op")),
width=int(m.group("width")),
height=(int(m.group("height")) if m.group("height") else None),
zoom=(int(m.group("zoom")) if m.group("zoom") else 0),
)
def at_width(self, width: int) -> "WagtailSizeSpec":
"""
Returns a scaled version of this spec to fit the new width
"""
ratio = float(width) / float(self.width)
if self.height:
new_height = ratio * self.height
else:
new_height = None
return self._replace(height=round(new_height), width=round(width))
@register.inclusion_tag("wools/images/fixed_size.html")
def image_fixed_size(
image: AbstractImage,
spec: str,
css_class: str = "",
fallback_format: str = "png",
lossless: bool = False,
):
"""
This tag manages images whose size on screen stay the same and simply
needs larger images for larger pixel ratios.
Image will be encoded in WebP with a fallback of the choosing of the
caller, by default PNG to make sure to lose nothing (neither quality
neither alpha channel).
Parameters
----------
image
Original Wagtail image
spec
Wagtail size spec
css_class
CSS class that will be added to the root <picture> element
fallback_format
The format to use for browsers that do not support WebP
lossless
Enables lossless compression for WebP. If you want the fallback to also
be lossless, you need to use "png" as fallback_format.
"""
parsed_spec = WagtailSizeSpec.parse(spec)
if fallback_format not in {"png", "jpeg"}:
raise ValueError('Only "png" and "jpeg" are allowed as fallbacks')
if not isinstance(image, AbstractImage):
return {}
base_rendition = image.get_rendition(f"{spec}|format-{fallback_format}")
sources = {}
if lossless:
webp_format = "webp-lossless"
else:
webp_format = "webp"
for fmt in [webp_format, fallback_format]:
sources[fmt] = dict(set=[], base=image.get_rendition(f"{spec}|format-{fmt}"))
for width, density in generate_widths(parsed_spec.width):
if int(density) == density:
density = int(density)
rendition = image.get_rendition(
f"{parsed_spec.at_width(width)}|format-{fmt}"
)
sources[fmt]["set"].append(
dict(
rendition=rendition,
density=density,
string=f"{rendition.url} {density}x",
)
)
sources[fmt]["srcset"] = ", ".join(x["string"] for x in sources[fmt]["set"])
return dict(
base_url=base_rendition.url,
size=dict(width=base_rendition.width, height=base_rendition.height),
alt=image.default_alt_text,
sources=sources,
css_class=css_class,
)
| [
"[email protected]"
] | |
6edf5f48c2d80e7fa0fef3d643563c3a07612cb8 | 512b388a53022f561e2375b4621f78572d3b4f04 | /catalogues/migrations/0021_identifications_campox.py | dd681d8898ea94145bf51e32bebc8937817497db | [] | no_license | Madoka09/Worker15 | 006d5ac44dc55c3ae7f72d3b8300f3567395cdff | 181012d309052b2df3d4ef99a197e8acef73a185 | refs/heads/master | 2023-03-24T05:29:02.060796 | 2021-03-16T21:56:21 | 2021-03-16T21:56:21 | 336,394,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.0.4 on 2020-11-23 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogues', '0020_auto_20201123_1401'),
]
operations = [
migrations.AddField(
model_name='identifications',
name='campox',
field=models.CharField(default='X', max_length=1),
),
]
| [
"[email protected]"
] | |
0e87d7a6cea6e6444077e686b495635459e2db8c | 33febf8b617ef66d7086765f1c0bf6523667a959 | /test/automatic/density.py | a584970a7d4dc681c0c7d31e59502b15bc8a5b87 | [] | no_license | JonasRSV/probpy | 857201c7f122461463b75d63e5c688e011615292 | 5203063db612b2b2bc0434a7f2a02c9d2e27ed6a | refs/heads/master | 2022-07-07T06:17:44.504570 | 2020-04-15T14:52:20 | 2020-04-15T14:52:20 | 245,820,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | import unittest
import time
import numpy as np
from probpy.distributions import normal
import numba
from probpy.density import UCKD, RCKD, URBK
from probpy.sampling import fast_metropolis_hastings
from probpy.search import search_posterior_estimation
from probpy.distributions import normal, exponential, jit
from probpy.learn.posterior.common import jit_log_probabilities
def distribution(x):
return 0.3333 * normal.p(x, -2, 1) + 0.3333 * normal.p(x, 2, 0.2) + 0.3333 * normal.p(x, 4, 0.2)
def log_distribution(x):
return np.log(0.3333 * normal.p(x, -2, 1) + 0.3333 * normal.p(x, 2, 0.2) + 0.3333 * normal.p(x, 4, 0.2))
class AutomaticDensityTest(unittest.TestCase):
def test_running_uckd(self):
timestamp = time.time()
samples = fast_metropolis_hastings(5000, distribution, initial=np.random.rand(10, 1), energy=1.0)
print("making samples", time.time() - timestamp)
density = UCKD(variance=5.0)
density.fit(samples)
lb, ub = -6, 6
n = 2000
x = np.linspace(lb, ub, n)
y = density.p(x)
y = y / (y.sum() / (n / (ub - lb)))
delta = (n / (ub - lb))
self.assertAlmostEqual(y.sum() / delta, 1, delta=0.1)
fast_p = density.get_fast_p()
fast_p(x) # is slower than normal p.. but numba need numba functions
def test_running_rckd(self):
timestamp = time.time()
samples = fast_metropolis_hastings(5000, distribution, initial=np.random.rand(50, 1), energy=1.0)
print("making samples", time.time() - timestamp)
density = RCKD(variance=5.0, error=0.001, verbose=True)
timestamp = time.time()
density.fit(samples)
print("fitting samples", time.time() - timestamp)
lb, ub = -6, 6
n = 2000
x = np.linspace(lb, ub, n)
print("x", len(x))
y = density.p(x)
delta = (n / (ub - lb))
self.assertAlmostEqual(y.sum() / delta, 1, delta=0.5)
fast_p = density.get_fast_p()
fast_p(x) # is slower than normal p.. but numba need numba functions
def test_running_urbk(self):
prior_rv = normal.med(mu=0.5, sigma=1.0)
n = normal.fast_p
prior = jit.jit_probability(prior_rv)
@numba.jit(fastmath=True, nopython=True, forceobj=False)
def likelihood(y, w):
return n(y - w, mu=0.0, sigma=1.0)
data = normal.sample(mu=3.0, sigma=1.0, size=100)
log_likelihood, log_prior = jit_log_probabilities((data,), likelihood, prior)
samples, densities = search_posterior_estimation(
size=300, log_likelihood=log_likelihood,
log_prior=log_prior,
initial=prior_rv.sample(size=10),
energy=0.1,
volume=100
)
density = URBK(variance=5.0, verbose=True)
density.fit(samples, densities)
lb, ub = -6, 6
n = 2000
x = np.linspace(lb, ub, n)
y = density.p(x)
self.assertEqual(y.size, 2000)
fast_p = density.get_fast_p()
fast_p(x) # is slower than normal p.. but numba need numba functions
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
162735502472a321677a2df6fbcd5c3543c436a1 | 868e94b7121b1f4c27fdf1d5ff88cb2fa6786e47 | /polling_stations/apps/data_finder/urls.py | 189b4d82cd5de12b0d179d75ff039766677fb31d | [] | no_license | JoeMitchell/UK-Polling-Stations | df0ebc2343a9b18928263a60be5718f08588782e | 861157b431f14eb24fdd17fb7380ac5e90d03a65 | refs/heads/master | 2021-01-24T04:35:49.749879 | 2016-02-29T11:55:46 | 2016-02-29T11:55:46 | 52,805,675 | 1 | 0 | null | 2016-02-29T16:27:05 | 2016-02-29T16:27:05 | null | UTF-8 | Python | false | false | 564 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.decorators.cache import cache_page
from constituencies.views import ConstituencyList, ConstituencyView
urlpatterns = patterns(
'',
url(r'^/$', cache_page(60*60)(ConstituencyList.as_view()), name='constituencies'),
# url(r'^/notspots/', view_not_spots, name='constituency_notspots'),
url(r'^/(?P<pk>[^/]+)(?:/(?P<ignored_slug>.*))?$',
cache_page(60 * 60 * 24)(ConstituencyView.as_view()),
name='constituency-view'),
)
| [
"[email protected]"
] | |
97c2aafef20a75e84303a743f770eef89dd13081 | 7d35dfa8933ad636585f8d0e6260811fef384d5d | /test/dialect/mysql/test_types.py | 7b7cf36679d503ba52c3e1f2b6e697da91665438 | [
"MIT"
] | permissive | gourneau/sqlalchemy | a3d330bbaade146458feb8ce11f72c0833e4c590 | 888f296540faf0e23d9b2b267a1932d712e17b01 | refs/heads/master | 2021-01-18T08:30:21.260742 | 2016-07-13T18:10:00 | 2016-07-13T18:10:00 | 63,383,368 | 1 | 0 | null | 2016-07-15T02:03:27 | 2016-07-15T02:03:25 | null | UTF-8 | Python | false | false | 38,610 | py | # coding: utf-8
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, is_
from sqlalchemy import *
from sqlalchemy import sql, exc, schema
from sqlalchemy.util import u
from sqlalchemy import util
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, AssertsExecutionResults
from sqlalchemy import testing
import datetime
import decimal
from sqlalchemy import types as sqltypes
class TypesTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
"Test MySQL column types"
__dialect__ = mysql.dialect()
__only_on__ = 'mysql'
__backend__ = True
def test_numeric(self):
"Exercise type specification and options for numeric types."
columns = [
# column type, args, kwargs, expected ddl
# e.g. Column(Integer(10, unsigned=True)) ==
# 'INTEGER(10) UNSIGNED'
(mysql.MSNumeric, [], {},
'NUMERIC'),
(mysql.MSNumeric, [None], {},
'NUMERIC'),
(mysql.MSNumeric, [12], {},
'NUMERIC(12)'),
(mysql.MSNumeric, [12, 4], {'unsigned':True},
'NUMERIC(12, 4) UNSIGNED'),
(mysql.MSNumeric, [12, 4], {'zerofill':True},
'NUMERIC(12, 4) ZEROFILL'),
(mysql.MSNumeric, [12, 4], {'zerofill':True, 'unsigned':True},
'NUMERIC(12, 4) UNSIGNED ZEROFILL'),
(mysql.MSDecimal, [], {},
'DECIMAL'),
(mysql.MSDecimal, [None], {},
'DECIMAL'),
(mysql.MSDecimal, [12], {},
'DECIMAL(12)'),
(mysql.MSDecimal, [12, None], {},
'DECIMAL(12)'),
(mysql.MSDecimal, [12, 4], {'unsigned':True},
'DECIMAL(12, 4) UNSIGNED'),
(mysql.MSDecimal, [12, 4], {'zerofill':True},
'DECIMAL(12, 4) ZEROFILL'),
(mysql.MSDecimal, [12, 4], {'zerofill':True, 'unsigned':True},
'DECIMAL(12, 4) UNSIGNED ZEROFILL'),
(mysql.MSDouble, [None, None], {},
'DOUBLE'),
(mysql.MSDouble, [12, 4], {'unsigned':True},
'DOUBLE(12, 4) UNSIGNED'),
(mysql.MSDouble, [12, 4], {'zerofill':True},
'DOUBLE(12, 4) ZEROFILL'),
(mysql.MSDouble, [12, 4], {'zerofill':True, 'unsigned':True},
'DOUBLE(12, 4) UNSIGNED ZEROFILL'),
(mysql.MSReal, [None, None], {},
'REAL'),
(mysql.MSReal, [12, 4], {'unsigned':True},
'REAL(12, 4) UNSIGNED'),
(mysql.MSReal, [12, 4], {'zerofill':True},
'REAL(12, 4) ZEROFILL'),
(mysql.MSReal, [12, 4], {'zerofill':True, 'unsigned':True},
'REAL(12, 4) UNSIGNED ZEROFILL'),
(mysql.MSFloat, [], {},
'FLOAT'),
(mysql.MSFloat, [None], {},
'FLOAT'),
(mysql.MSFloat, [12], {},
'FLOAT(12)'),
(mysql.MSFloat, [12, 4], {},
'FLOAT(12, 4)'),
(mysql.MSFloat, [12, 4], {'unsigned':True},
'FLOAT(12, 4) UNSIGNED'),
(mysql.MSFloat, [12, 4], {'zerofill':True},
'FLOAT(12, 4) ZEROFILL'),
(mysql.MSFloat, [12, 4], {'zerofill':True, 'unsigned':True},
'FLOAT(12, 4) UNSIGNED ZEROFILL'),
(mysql.MSInteger, [], {},
'INTEGER'),
(mysql.MSInteger, [4], {},
'INTEGER(4)'),
(mysql.MSInteger, [4], {'unsigned':True},
'INTEGER(4) UNSIGNED'),
(mysql.MSInteger, [4], {'zerofill':True},
'INTEGER(4) ZEROFILL'),
(mysql.MSInteger, [4], {'zerofill':True, 'unsigned':True},
'INTEGER(4) UNSIGNED ZEROFILL'),
(mysql.MSBigInteger, [], {},
'BIGINT'),
(mysql.MSBigInteger, [4], {},
'BIGINT(4)'),
(mysql.MSBigInteger, [4], {'unsigned':True},
'BIGINT(4) UNSIGNED'),
(mysql.MSBigInteger, [4], {'zerofill':True},
'BIGINT(4) ZEROFILL'),
(mysql.MSBigInteger, [4], {'zerofill':True, 'unsigned':True},
'BIGINT(4) UNSIGNED ZEROFILL'),
(mysql.MSMediumInteger, [], {},
'MEDIUMINT'),
(mysql.MSMediumInteger, [4], {},
'MEDIUMINT(4)'),
(mysql.MSMediumInteger, [4], {'unsigned':True},
'MEDIUMINT(4) UNSIGNED'),
(mysql.MSMediumInteger, [4], {'zerofill':True},
'MEDIUMINT(4) ZEROFILL'),
(mysql.MSMediumInteger, [4], {'zerofill':True, 'unsigned':True},
'MEDIUMINT(4) UNSIGNED ZEROFILL'),
(mysql.MSTinyInteger, [], {},
'TINYINT'),
(mysql.MSTinyInteger, [1], {},
'TINYINT(1)'),
(mysql.MSTinyInteger, [1], {'unsigned':True},
'TINYINT(1) UNSIGNED'),
(mysql.MSTinyInteger, [1], {'zerofill':True},
'TINYINT(1) ZEROFILL'),
(mysql.MSTinyInteger, [1], {'zerofill':True, 'unsigned':True},
'TINYINT(1) UNSIGNED ZEROFILL'),
(mysql.MSSmallInteger, [], {},
'SMALLINT'),
(mysql.MSSmallInteger, [4], {},
'SMALLINT(4)'),
(mysql.MSSmallInteger, [4], {'unsigned':True},
'SMALLINT(4) UNSIGNED'),
(mysql.MSSmallInteger, [4], {'zerofill':True},
'SMALLINT(4) ZEROFILL'),
(mysql.MSSmallInteger, [4], {'zerofill':True, 'unsigned':True},
'SMALLINT(4) UNSIGNED ZEROFILL'),
]
for type_, args, kw, res in columns:
type_inst = type_(*args, **kw)
self.assert_compile(
type_inst,
res
)
# test that repr() copies out all arguments
self.assert_compile(
eval("mysql.%r" % type_inst),
res
)
# fixed in mysql-connector as of 2.0.1,
# see http://bugs.mysql.com/bug.php?id=73266
@testing.provide_metadata
def test_precision_float_roundtrip(self):
t = Table('t', self.metadata,
Column('scale_value', mysql.DOUBLE(
precision=15, scale=12, asdecimal=True)),
Column('unscale_value', mysql.DOUBLE(
decimal_return_scale=12, asdecimal=True))
)
t.create(testing.db)
testing.db.execute(
t.insert(), scale_value=45.768392065789,
unscale_value=45.768392065789
)
result = testing.db.scalar(select([t.c.scale_value]))
eq_(result, decimal.Decimal("45.768392065789"))
result = testing.db.scalar(select([t.c.unscale_value]))
eq_(result, decimal.Decimal("45.768392065789"))
@testing.exclude('mysql', '<', (4, 1, 1), 'no charset support')
def test_charset(self):
"""Exercise CHARACTER SET and COLLATE-ish options on string types."""
columns = [
(mysql.MSChar, [1], {},
'CHAR(1)'),
(mysql.NCHAR, [1], {},
'NATIONAL CHAR(1)'),
(mysql.MSChar, [1], {'binary':True},
'CHAR(1) BINARY'),
(mysql.MSChar, [1], {'ascii':True},
'CHAR(1) ASCII'),
(mysql.MSChar, [1], {'unicode':True},
'CHAR(1) UNICODE'),
(mysql.MSChar, [1], {'ascii':True, 'binary':True},
'CHAR(1) ASCII BINARY'),
(mysql.MSChar, [1], {'unicode':True, 'binary':True},
'CHAR(1) UNICODE BINARY'),
(mysql.MSChar, [1], {'charset':'utf8'},
'CHAR(1) CHARACTER SET utf8'),
(mysql.MSChar, [1], {'charset':'utf8', 'binary':True},
'CHAR(1) CHARACTER SET utf8 BINARY'),
(mysql.MSChar, [1], {'charset':'utf8', 'unicode':True},
'CHAR(1) CHARACTER SET utf8'),
(mysql.MSChar, [1], {'charset':'utf8', 'ascii':True},
'CHAR(1) CHARACTER SET utf8'),
(mysql.MSChar, [1], {'collation': 'utf8_bin'},
'CHAR(1) COLLATE utf8_bin'),
(mysql.MSChar, [1], {'charset': 'utf8', 'collation': 'utf8_bin'},
'CHAR(1) CHARACTER SET utf8 COLLATE utf8_bin'),
(mysql.MSChar, [1], {'charset': 'utf8', 'binary': True},
'CHAR(1) CHARACTER SET utf8 BINARY'),
(mysql.MSChar, [1], {'charset': 'utf8', 'collation': 'utf8_bin',
'binary': True},
'CHAR(1) CHARACTER SET utf8 COLLATE utf8_bin'),
(mysql.MSChar, [1], {'national':True},
'NATIONAL CHAR(1)'),
(mysql.MSChar, [1], {'national':True, 'charset':'utf8'},
'NATIONAL CHAR(1)'),
(mysql.MSChar, [1], {'national':True, 'charset':'utf8',
'binary':True},
'NATIONAL CHAR(1) BINARY'),
(mysql.MSChar, [1], {'national':True, 'binary':True,
'unicode':True},
'NATIONAL CHAR(1) BINARY'),
(mysql.MSChar, [1], {'national':True, 'collation':'utf8_bin'},
'NATIONAL CHAR(1) COLLATE utf8_bin'),
(mysql.MSString, [1], {'charset':'utf8', 'collation':'utf8_bin'},
'VARCHAR(1) CHARACTER SET utf8 COLLATE utf8_bin'),
(mysql.MSString, [1], {'national':True, 'collation':'utf8_bin'},
'NATIONAL VARCHAR(1) COLLATE utf8_bin'),
(mysql.MSTinyText, [], {'charset':'utf8', 'collation':'utf8_bin'},
'TINYTEXT CHARACTER SET utf8 COLLATE utf8_bin'),
(mysql.MSMediumText, [], {'charset':'utf8', 'binary':True},
'MEDIUMTEXT CHARACTER SET utf8 BINARY'),
(mysql.MSLongText, [], {'ascii':True},
'LONGTEXT ASCII'),
(mysql.ENUM, ["foo", "bar"], {'unicode':True},
'''ENUM('foo','bar') UNICODE'''),
(String, [20], {"collation": "utf8"}, 'VARCHAR(20) COLLATE utf8')
]
for type_, args, kw, res in columns:
type_inst = type_(*args, **kw)
self.assert_compile(
type_inst,
res
)
# test that repr() copies out all arguments
self.assert_compile(
eval("mysql.%r" % type_inst)
if type_ is not String
else eval("%r" % type_inst),
res
)
@testing.only_if('mysql')
@testing.fails_on('mysql+mysqlconnector', "different unicode behavior")
@testing.exclude('mysql', '<', (5, 0, 5), 'a 5.0+ feature')
@testing.provide_metadata
def test_charset_collate_table(self):
t = Table('foo', self.metadata,
Column('id', Integer),
Column('data', UnicodeText),
mysql_default_charset='utf8',
mysql_collate='utf8_bin'
)
t.create()
m2 = MetaData(testing.db)
t2 = Table('foo', m2, autoload=True)
eq_(t2.kwargs['mysql_collate'], 'utf8_bin')
eq_(t2.kwargs['mysql_default charset'], 'utf8')
# test [ticket:2906]
# in order to test the condition here, need to use
# MySQLdb 1.2.3 and also need to pass either use_unicode=1
# or charset=utf8 to the URL.
t.insert().execute(id=1, data=u('some text'))
assert isinstance(testing.db.scalar(select([t.c.data])), util.text_type)
def test_bit_50(self):
"""Exercise BIT types on 5.0+ (not valid for all engine types)"""
for type_, expected in [
(mysql.MSBit(), "BIT"),
(mysql.MSBit(1), "BIT(1)"),
(mysql.MSBit(63), "BIT(63)"),
]:
self.assert_compile(type_, expected)
@testing.exclude('mysql', '<', (5, 0, 5), 'a 5.0+ feature')
@testing.provide_metadata
def test_bit_50_roundtrip(self):
bit_table = Table('mysql_bits', self.metadata,
Column('b1', mysql.MSBit),
Column('b2', mysql.MSBit()),
Column('b3', mysql.MSBit(), nullable=False),
Column('b4', mysql.MSBit(1)),
Column('b5', mysql.MSBit(8)),
Column('b6', mysql.MSBit(32)),
Column('b7', mysql.MSBit(63)),
Column('b8', mysql.MSBit(64)))
self.metadata.create_all()
meta2 = MetaData(testing.db)
reflected = Table('mysql_bits', meta2, autoload=True)
for table in bit_table, reflected:
def roundtrip(store, expected=None):
expected = expected or store
table.insert(store).execute()
row = table.select().execute().first()
try:
self.assert_(list(row) == expected)
except:
print("Storing %s" % store)
print("Expected %s" % expected)
print("Found %s" % list(row))
raise
table.delete().execute().close()
roundtrip([0] * 8)
roundtrip([None, None, 0, None, None, None, None, None])
roundtrip([1] * 8)
roundtrip([sql.text("b'1'")] * 8, [1] * 8)
i = 255
roundtrip([0, 0, 0, 0, i, i, i, i])
i = 2 ** 32 - 1
roundtrip([0, 0, 0, 0, 0, i, i, i])
i = 2 ** 63 - 1
roundtrip([0, 0, 0, 0, 0, 0, i, i])
i = 2 ** 64 - 1
roundtrip([0, 0, 0, 0, 0, 0, 0, i])
def test_boolean(self):
for type_, expected in [
(BOOLEAN(), "BOOL"),
(Boolean(), "BOOL"),
(mysql.TINYINT(1), "TINYINT(1)"),
(mysql.TINYINT(1, unsigned=True), "TINYINT(1) UNSIGNED")
]:
self.assert_compile(type_, expected)
@testing.provide_metadata
def test_boolean_roundtrip(self):
bool_table = Table(
'mysql_bool',
self.metadata,
Column('b1', BOOLEAN),
Column('b2', Boolean),
Column('b3', mysql.MSTinyInteger(1)),
Column('b4', mysql.MSTinyInteger(1, unsigned=True)),
Column('b5', mysql.MSTinyInteger),
)
self.metadata.create_all()
table = bool_table
def roundtrip(store, expected=None):
expected = expected or store
table.insert(store).execute()
row = table.select().execute().first()
self.assert_(list(row) == expected)
for i, val in enumerate(expected):
if isinstance(val, bool):
self.assert_(val is row[i])
table.delete().execute()
roundtrip([None, None, None, None, None])
roundtrip([True, True, 1, 1, 1])
roundtrip([False, False, 0, 0, 0])
roundtrip([True, True, True, True, True], [True, True, 1,
1, 1])
roundtrip([False, False, 0, 0, 0], [False, False, 0, 0, 0])
meta2 = MetaData(testing.db)
table = Table('mysql_bool', meta2, autoload=True)
eq_(colspec(table.c.b3), 'b3 TINYINT(1)')
eq_(colspec(table.c.b4), 'b4 TINYINT(1) UNSIGNED')
meta2 = MetaData(testing.db)
table = Table(
'mysql_bool',
meta2,
Column('b1', BOOLEAN),
Column('b2', Boolean),
Column('b3', BOOLEAN),
Column('b4', BOOLEAN),
autoload=True,
)
eq_(colspec(table.c.b3), 'b3 BOOL')
eq_(colspec(table.c.b4), 'b4 BOOL')
roundtrip([None, None, None, None, None])
roundtrip([True, True, 1, 1, 1], [True, True, True, True,
1])
roundtrip([False, False, 0, 0, 0], [False, False, False,
False, 0])
roundtrip([True, True, True, True, True], [True, True,
True, True, 1])
roundtrip([False, False, 0, 0, 0], [False, False, False,
False, 0])
def test_timestamp_fsp(self):
self.assert_compile(
mysql.TIMESTAMP(fsp=5),
"TIMESTAMP(5)"
)
def test_timestamp_defaults(self):
"""Exercise funky TIMESTAMP default syntax when used in columns."""
columns = [
([TIMESTAMP], {},
'TIMESTAMP NULL'),
([mysql.MSTimeStamp], {},
'TIMESTAMP NULL'),
([mysql.MSTimeStamp(),
DefaultClause(sql.text('CURRENT_TIMESTAMP'))],
{},
"TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP"),
([mysql.MSTimeStamp,
DefaultClause(sql.text('CURRENT_TIMESTAMP'))],
{'nullable': False},
"TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP"),
([mysql.MSTimeStamp,
DefaultClause(sql.text("'1999-09-09 09:09:09'"))],
{'nullable': False},
"TIMESTAMP NOT NULL DEFAULT '1999-09-09 09:09:09'"),
([mysql.MSTimeStamp(),
DefaultClause(sql.text("'1999-09-09 09:09:09'"))],
{},
"TIMESTAMP NULL DEFAULT '1999-09-09 09:09:09'"),
([mysql.MSTimeStamp(),
DefaultClause(sql.text(
"'1999-09-09 09:09:09' "
"ON UPDATE CURRENT_TIMESTAMP"))],
{},
"TIMESTAMP NULL DEFAULT '1999-09-09 09:09:09' "
"ON UPDATE CURRENT_TIMESTAMP"),
([mysql.MSTimeStamp,
DefaultClause(sql.text(
"'1999-09-09 09:09:09' "
"ON UPDATE CURRENT_TIMESTAMP"))],
{'nullable': False},
"TIMESTAMP NOT NULL DEFAULT '1999-09-09 09:09:09' "
"ON UPDATE CURRENT_TIMESTAMP"),
([mysql.MSTimeStamp(),
DefaultClause(sql.text(
"CURRENT_TIMESTAMP "
"ON UPDATE CURRENT_TIMESTAMP"))],
{},
"TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP "
"ON UPDATE CURRENT_TIMESTAMP"),
([mysql.MSTimeStamp,
DefaultClause(sql.text(
"CURRENT_TIMESTAMP "
"ON UPDATE CURRENT_TIMESTAMP"))],
{'nullable': False},
"TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP "
"ON UPDATE CURRENT_TIMESTAMP"),
]
for spec, kw, expected in columns:
c = Column('t', *spec, **kw)
Table('t', MetaData(), c)
self.assert_compile(
schema.CreateColumn(c),
"t %s" % expected
)
@testing.provide_metadata
def test_timestamp_nullable(self):
ts_table = Table(
'mysql_timestamp', self.metadata,
Column('t1', TIMESTAMP),
Column('t2', TIMESTAMP, nullable=False),
mysql_engine='InnoDB'
)
self.metadata.create_all()
# TIMESTAMP without NULL inserts current time when passed
# NULL. when not passed, generates 0000-00-00 quite
# annoyingly.
# the flag http://dev.mysql.com/doc/refman/5.6/en/\
# server-system-variables.html#sysvar_explicit_defaults_for_timestamp
# changes this for 5.6 if set.
# normalize dates that are over the second boundary
def normalize(dt):
if dt is None:
return None
elif (dt - now).seconds < 5:
return now
else:
return dt
with testing.db.begin() as conn:
now = conn.scalar("select now()")
conn.execute(
ts_table.insert(), {'t1': now, 't2': None})
conn.execute(
ts_table.insert(), {'t1': None, 't2': None})
conn.execute(
ts_table.insert(), {'t2': None})
eq_(
[tuple([normalize(dt) for dt in row])
for row in conn.execute(ts_table.select())],
[
(now, now),
(None, now),
(None, now)
]
)
def test_datetime_generic(self):
self.assert_compile(
mysql.DATETIME(),
"DATETIME"
)
def test_datetime_fsp(self):
self.assert_compile(
mysql.DATETIME(fsp=4),
"DATETIME(4)"
)
def test_time_generic(self):
""""Exercise TIME."""
self.assert_compile(
mysql.TIME(),
"TIME"
)
def test_time_fsp(self):
self.assert_compile(
mysql.TIME(fsp=5),
"TIME(5)"
)
def test_time_result_processor(self):
eq_(
mysql.TIME().result_processor(None, None)(
datetime.timedelta(seconds=35, minutes=517,
microseconds=450
)),
datetime.time(8, 37, 35, 450)
)
@testing.fails_on("mysql+oursql", "TODO: probable OurSQL bug")
@testing.provide_metadata
def test_time_roundtrip(self):
t = Table('mysql_time', self.metadata,
Column('t1', mysql.TIME())
)
t.create()
t.insert().values(t1=datetime.time(8, 37, 35)).execute()
eq_(select([t.c.t1]).scalar(), datetime.time(8, 37, 35))
@testing.provide_metadata
def test_year(self):
"""Exercise YEAR."""
year_table = Table('mysql_year', self.metadata,
Column('y1', mysql.MSYear),
Column('y2', mysql.MSYear),
Column('y3', mysql.MSYear),
Column('y5', mysql.MSYear(4)))
for col in year_table.c:
self.assert_(repr(col))
year_table.create()
reflected = Table('mysql_year', MetaData(testing.db),
autoload=True)
for table in year_table, reflected:
table.insert(['1950', '50', None, 1950]).execute()
row = table.select().execute().first()
eq_(list(row), [1950, 2050, None, 1950])
table.delete().execute()
self.assert_(colspec(table.c.y1).startswith('y1 YEAR'))
eq_(colspec(table.c.y5), 'y5 YEAR(4)')
class JSONTest(fixtures.TestBase):
__requires__ = ('json_type', )
__only_on__ = 'mysql'
__backend__ = True
@testing.provide_metadata
def test_reflection(self):
Table(
'mysql_json', self.metadata,
Column('foo', mysql.JSON)
)
self.metadata.create_all()
reflected = Table('mysql_json', MetaData(), autoload_with=testing.db)
is_(reflected.c.foo.type._type_affinity, sqltypes.JSON)
assert isinstance(reflected.c.foo.type, mysql.JSON)
@testing.provide_metadata
def test_rudimental_round_trip(self):
# note that test_suite has many more JSON round trip tests
# using the backend-agnostic JSON type
mysql_json = Table(
'mysql_json', self.metadata,
Column('foo', mysql.JSON)
)
self.metadata.create_all()
value = {
'json': {'foo': 'bar'},
'recs': ['one', 'two']
}
with testing.db.connect() as conn:
conn.execute(mysql_json.insert(), foo=value)
eq_(
conn.scalar(select([mysql_json.c.foo])),
value
)
class EnumSetTest(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
__only_on__ = 'mysql'
__dialect__ = mysql.dialect()
__backend__ = True
@testing.provide_metadata
def test_enum(self):
"""Exercise the ENUM type."""
with testing.expect_deprecated('Manually quoting ENUM value literals'):
e1, e2 = mysql.ENUM("'a'", "'b'"), mysql.ENUM("'a'", "'b'")
e3 = mysql.ENUM("'a'", "'b'", strict=True)
e4 = mysql.ENUM("'a'", "'b'", strict=True)
enum_table = Table(
'mysql_enum', self.metadata,
Column('e1', e1),
Column('e2', e2, nullable=False),
Column(
'e2generic',
Enum("a", "b", validate_strings=True), nullable=False),
Column('e3', e3),
Column('e4', e4,
nullable=False),
Column('e5', mysql.ENUM("a", "b")),
Column('e5generic', Enum("a", "b")),
Column('e6', mysql.ENUM("'a'", "b")),
)
eq_(
colspec(enum_table.c.e1),
"e1 ENUM('a','b')")
eq_(
colspec(enum_table.c.e2),
"e2 ENUM('a','b') NOT NULL")
eq_(
colspec(enum_table.c.e2generic),
"e2generic ENUM('a','b') NOT NULL")
eq_(
colspec(enum_table.c.e3),
"e3 ENUM('a','b')")
eq_(
colspec(enum_table.c.e4),
"e4 ENUM('a','b') NOT NULL")
eq_(
colspec(enum_table.c.e5),
"e5 ENUM('a','b')")
eq_(
colspec(enum_table.c.e5generic),
"e5generic ENUM('a','b')")
eq_(
colspec(enum_table.c.e6),
"e6 ENUM('''a''','b')")
enum_table.create()
assert_raises(
exc.DBAPIError, enum_table.insert().execute,
e1=None, e2=None, e3=None, e4=None)
assert_raises(
exc.StatementError,
enum_table.insert().execute,
e1='c', e2='c', e2generic='c', e3='c',
e4='c', e5='c', e5generic='c', e6='c')
enum_table.insert().execute()
enum_table.insert().execute(e1='a', e2='a', e2generic='a', e3='a',
e4='a', e5='a', e5generic='a', e6="'a'")
enum_table.insert().execute(e1='b', e2='b', e2generic='b', e3='b',
e4='b', e5='b', e5generic='b', e6='b')
res = enum_table.select().execute().fetchall()
expected = [(None, 'a', 'a', None, 'a', None, None, None),
('a', 'a', 'a', 'a', 'a', 'a', 'a', "'a'"),
('b', 'b', 'b', 'b', 'b', 'b', 'b', 'b')]
eq_(res, expected)
def _set_fixture_one(self):
with testing.expect_deprecated('Manually quoting SET value literals'):
e1, e2 = mysql.SET("'a'", "'b'"), mysql.SET("'a'", "'b'")
e4 = mysql.SET("'a'", "b")
e5 = mysql.SET("'a'", "'b'", quoting="quoted")
set_table = Table(
'mysql_set', self.metadata,
Column('e1', e1),
Column('e2', e2, nullable=False),
Column('e3', mysql.SET("a", "b")),
Column('e4', e4),
Column('e5', e5)
)
return set_table
def test_set_colspec(self):
self.metadata = MetaData()
set_table = self._set_fixture_one()
eq_(
colspec(set_table.c.e1),
"e1 SET('a','b')")
eq_(colspec(
set_table.c.e2),
"e2 SET('a','b') NOT NULL")
eq_(
colspec(set_table.c.e3),
"e3 SET('a','b')")
eq_(
colspec(set_table.c.e4),
"e4 SET('''a''','b')")
eq_(
colspec(set_table.c.e5),
"e5 SET('a','b')")
@testing.provide_metadata
def test_no_null(self):
set_table = self._set_fixture_one()
set_table.create()
assert_raises(
exc.DBAPIError, set_table.insert().execute,
e1=None, e2=None, e3=None, e4=None)
@testing.only_on('+oursql')
@testing.provide_metadata
def test_oursql_error_one(self):
set_table = self._set_fixture_one()
set_table.create()
assert_raises(
exc.StatementError, set_table.insert().execute,
e1='c', e2='c', e3='c', e4='c')
@testing.fails_on("+oursql", "oursql raises on the truncate warning")
@testing.provide_metadata
def test_empty_set_no_empty_string(self):
t = Table(
't', self.metadata,
Column('id', Integer),
Column('data', mysql.SET("a", "b"))
)
t.create()
with testing.db.begin() as conn:
conn.execute(
t.insert(),
{'id': 1, 'data': set()},
{'id': 2, 'data': set([''])},
{'id': 3, 'data': set(['a', ''])},
{'id': 4, 'data': set(['b'])},
)
eq_(
conn.execute(t.select().order_by(t.c.id)).fetchall(),
[
(1, set()),
(2, set()),
(3, set(['a'])),
(4, set(['b'])),
]
)
def test_bitwise_required_for_empty(self):
assert_raises_message(
exc.ArgumentError,
"Can't use the blank value '' in a SET without setting "
"retrieve_as_bitwise=True",
mysql.SET, "a", "b", ''
)
@testing.provide_metadata
def test_empty_set_empty_string(self):
t = Table(
't', self.metadata,
Column('id', Integer),
Column('data', mysql.SET("a", "b", '', retrieve_as_bitwise=True))
)
t.create()
with testing.db.begin() as conn:
conn.execute(
t.insert(),
{'id': 1, 'data': set()},
{'id': 2, 'data': set([''])},
{'id': 3, 'data': set(['a', ''])},
{'id': 4, 'data': set(['b'])},
)
eq_(
conn.execute(t.select().order_by(t.c.id)).fetchall(),
[
(1, set()),
(2, set([''])),
(3, set(['a', ''])),
(4, set(['b'])),
]
)
@testing.provide_metadata
def test_string_roundtrip(self):
set_table = self._set_fixture_one()
set_table.create()
with testing.db.begin() as conn:
conn.execute(
set_table.insert(),
dict(e1='a', e2='a', e3='a', e4="'a'", e5="a,b"))
conn.execute(
set_table.insert(),
dict(e1='b', e2='b', e3='b', e4='b', e5="a,b"))
expected = [
(set(['a']), set(['a']), set(['a']),
set(["'a'"]), set(['a', 'b'])),
(set(['b']), set(['b']), set(['b']),
set(['b']), set(['a', 'b']))
]
res = conn.execute(
set_table.select()
).fetchall()
eq_(res, expected)
@testing.provide_metadata
def test_unicode_roundtrip(self):
set_table = Table(
't', self.metadata,
Column('id', Integer, primary_key=True),
Column('data', mysql.SET(
u('réveillé'), u('drôle'), u('S’il'), convert_unicode=True)),
)
set_table.create()
with testing.db.begin() as conn:
conn.execute(
set_table.insert(),
{"data": set([u('réveillé'), u('drôle')])})
row = conn.execute(
set_table.select()
).first()
eq_(
row,
(1, set([u('réveillé'), u('drôle')]))
)
@testing.provide_metadata
def test_int_roundtrip(self):
set_table = self._set_fixture_one()
set_table.create()
with testing.db.begin() as conn:
conn.execute(
set_table.insert(),
dict(e1=1, e2=2, e3=3, e4=3, e5=0)
)
res = conn.execute(set_table.select()).first()
eq_(
res,
(
set(['a']), set(['b']), set(['a', 'b']),
set(["'a'", 'b']), set([]))
)
@testing.provide_metadata
def test_set_roundtrip_plus_reflection(self):
set_table = Table(
'mysql_set', self.metadata,
Column('s1', mysql.SET("dq", "sq")),
Column('s2', mysql.SET("a")),
Column('s3', mysql.SET("5", "7", "9")))
eq_(colspec(set_table.c.s1), "s1 SET('dq','sq')")
eq_(colspec(set_table.c.s2), "s2 SET('a')")
eq_(colspec(set_table.c.s3), "s3 SET('5','7','9')")
set_table.create()
reflected = Table('mysql_set', MetaData(testing.db),
autoload=True)
for table in set_table, reflected:
def roundtrip(store, expected=None):
expected = expected or store
table.insert(store).execute()
row = table.select().execute().first()
eq_(row, tuple(expected))
table.delete().execute()
roundtrip([None, None, None], [None] * 3)
roundtrip(['', '', ''], [set([])] * 3)
roundtrip([set(['dq']), set(['a']), set(['5'])])
roundtrip(['dq', 'a', '5'], [set(['dq']), set(['a']),
set(['5'])])
roundtrip([1, 1, 1], [set(['dq']), set(['a']), set(['5'])])
roundtrip([set(['dq', 'sq']), None, set(['9', '5', '7'])])
set_table.insert().execute(
{'s3': set(['5'])},
{'s3': set(['5', '7'])},
{'s3': set(['5', '7', '9'])},
{'s3': set(['7', '9'])})
rows = select(
[set_table.c.s3],
set_table.c.s3.in_([set(['5']), ['5', '7']])
).execute().fetchall()
found = set([frozenset(row[0]) for row in rows])
eq_(found, set([frozenset(['5']), frozenset(['5', '7'])]))
@testing.provide_metadata
def test_unicode_enum(self):
metadata = self.metadata
t1 = Table(
'table', metadata,
Column('id', Integer, primary_key=True),
Column('value', Enum(u('réveillé'), u('drôle'), u('S’il'))),
Column('value2', mysql.ENUM(u('réveillé'), u('drôle'), u('S’il')))
)
metadata.create_all()
t1.insert().execute(value=u('drôle'), value2=u('drôle'))
t1.insert().execute(value=u('réveillé'), value2=u('réveillé'))
t1.insert().execute(value=u('S’il'), value2=u('S’il'))
eq_(t1.select().order_by(t1.c.id).execute().fetchall(),
[
(1, u('drôle'), u('drôle')),
(2, u('réveillé'), u('réveillé')),
(3, u('S’il'), u('S’il'))
])
# test reflection of the enum labels
m2 = MetaData(testing.db)
t2 = Table('table', m2, autoload=True)
# TODO: what's wrong with the last element ? is there
# latin-1 stuff forcing its way in ?
eq_(
t2.c.value.type.enums[0:2],
[u('réveillé'), u('drôle')] # u'S’il') # eh ?
)
eq_(
t2.c.value2.type.enums[0:2],
[u('réveillé'), u('drôle')] # u'S’il') # eh ?
)
def test_enum_compile(self):
e1 = Enum('x', 'y', 'z', name='somename')
t1 = Table('sometable', MetaData(), Column('somecolumn', e1))
self.assert_compile(schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn "
"ENUM('x','y','z'))")
t1 = Table('sometable', MetaData(), Column('somecolumn',
Enum('x', 'y', 'z', native_enum=False)))
self.assert_compile(schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn "
"VARCHAR(1), CHECK (somecolumn IN ('x', "
"'y', 'z')))")
@testing.provide_metadata
@testing.exclude('mysql', '<', (4,), "3.23 can't handle an ENUM of ''")
def test_enum_parse(self):
with testing.expect_deprecated('Manually quoting ENUM value literals'):
enum_table = Table(
'mysql_enum', self.metadata,
Column('e1', mysql.ENUM("'a'")),
Column('e2', mysql.ENUM("''")),
Column('e3', mysql.ENUM('a')),
Column('e4', mysql.ENUM('')),
Column('e5', mysql.ENUM("'a'", "''")),
Column('e6', mysql.ENUM("''", "'a'")),
Column('e7', mysql.ENUM("''", "'''a'''", "'b''b'", "''''")))
for col in enum_table.c:
self.assert_(repr(col))
enum_table.create()
reflected = Table('mysql_enum', MetaData(testing.db),
autoload=True)
for t in enum_table, reflected:
eq_(t.c.e1.type.enums, ["a"])
eq_(t.c.e2.type.enums, [""])
eq_(t.c.e3.type.enums, ["a"])
eq_(t.c.e4.type.enums, [""])
eq_(t.c.e5.type.enums, ["a", ""])
eq_(t.c.e6.type.enums, ["", "a"])
eq_(t.c.e7.type.enums, ["", "'a'", "b'b", "'"])
@testing.provide_metadata
@testing.exclude('mysql', '<', (5,))
def test_set_parse(self):
with testing.expect_deprecated('Manually quoting SET value literals'):
set_table = Table(
'mysql_set', self.metadata,
Column('e1', mysql.SET("'a'")),
Column('e2', mysql.SET("''", retrieve_as_bitwise=True)),
Column('e3', mysql.SET('a')),
Column('e4', mysql.SET('', retrieve_as_bitwise=True)),
Column('e5', mysql.SET("'a'", "''", retrieve_as_bitwise=True)),
Column('e6', mysql.SET("''", "'a'", retrieve_as_bitwise=True)),
Column('e7', mysql.SET(
"''", "'''a'''", "'b''b'", "''''",
retrieve_as_bitwise=True)))
for col in set_table.c:
self.assert_(repr(col))
set_table.create()
# don't want any warnings on reflection
reflected = Table('mysql_set', MetaData(testing.db),
autoload=True)
for t in set_table, reflected:
eq_(t.c.e1.type.values, ("a",))
eq_(t.c.e2.type.values, ("",))
eq_(t.c.e3.type.values, ("a",))
eq_(t.c.e4.type.values, ("",))
eq_(t.c.e5.type.values, ("a", ""))
eq_(t.c.e6.type.values, ("", "a"))
eq_(t.c.e7.type.values, ("", "'a'", "b'b", "'"))
def colspec(c):
return testing.db.dialect.ddl_compiler(
testing.db.dialect, None).get_column_specification(c)
| [
"[email protected]"
] | |
3c3d95dff7b579e3d543083c1d578d7d2cb875e9 | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/06. Dictionary/085.py | 312117acb86d80c3c56dabd741a80f0428f061e9 | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | #딕셔너리 _ 바인딩
ice_cream = {'메로나':1000, '폴라포':1200, '빵빠레':1800}
print(ice_cream) | [
"[email protected]"
] | |
5e34b4c222f783ed89445515b8e02676f8214e69 | fc629dba07e98bfd44a671112f47091ad8935631 | /others/paging.py | 74599271584ddce1c2d8a45117c02262d51cfb41 | [] | no_license | lostsquirrel/python_test | c990e0c29bdf2eecae9411983b68d1f984afac84 | eb171b45bbf2f29cd1307aefd8e4609b683773d8 | refs/heads/master | 2022-09-01T11:30:16.847626 | 2022-05-18T07:43:49 | 2022-05-18T07:43:49 | 9,890,003 | 0 | 1 | null | 2022-05-18T07:43:49 | 2013-05-06T15:35:24 | Python | UTF-8 | Python | false | false | 67 | py | for x in range(50):
print x
print (x + 5 - 1) / 5
print | [
"[email protected]"
] | |
ea4e5b13a18d090736df358341d6202a061d1db4 | 612325535126eaddebc230d8c27af095c8e5cc2f | /src/build/android/gyp/touch.py | d3a8735504614a4f317a231b2d10a4808cf31e0c | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from util import build_utils
def main(argv):
for f in argv[1:]:
build_utils.Touch(f)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
aefb6400427a8ca905416089a51c9b81d336cb16 | 7701773efa258510951bc7d45325b4cca26b3a7d | /tkinter_explore/positionAppWindow.py | f479edd5f1b498b414abe22911f53753c94429d3 | [] | no_license | Archanciel/explore | c170b2c8b5eed0c1220d5e7c2ac326228f6b2485 | 0576369ded0e54ce7ff9596ec4df076e69067e0c | refs/heads/master | 2022-06-17T19:15:03.647074 | 2022-06-01T20:07:04 | 2022-06-01T20:07:04 | 105,314,051 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from tkinter import *
import tkinter.messagebox as msgb
root = Tk() # create a Tk root window
w = 600 # width for the Tk root
h = 300 # height for the Tk root
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws - w) / 2
y = (hs - h) / 2
# set the dimensions of the screen
# and where it is placed
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.title('Order medicaments')
def on_sendMail():
global entryform
strng = entryform.get()
if strng == '1':
msgb.showinfo(message='You typed 1') # modif 3
else:
msgb.showinfo(message='Please type 1') # modif 4
entryform = Entry(root)
entryform.pack()
sendmail = Button(root, text="Send mail", command=on_sendMail)
sendmail.pack(side=BOTTOM)
root.mainloop() # starts the mainloop | [
"[email protected]"
] | |
1cce3d810392e2a1fdd61dad3232bb7d13a880d8 | f75a6b032d3c3543b3833c88ddf12a06cba8228a | /lab2/cartpole_dqn.py | 65a5b05c727b2c4d595be8c4f0a3527ad12cee46 | [] | no_license | chris4540/EL2805-Reinforcement-Learning | aab36d3e29bf1cabea1a326c2ac8b38ea9cda4bb | 385c19655ade75f5e263beef0f43f2a9476641c4 | refs/heads/master | 2023-02-14T01:37:41.270208 | 2020-04-12T22:31:46 | 2020-04-12T22:31:46 | 328,265,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,358 | py | import sys
import gym
import pylab
import random
import numpy as np
from pathlib import Path
from os.path import join
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
from utils.exp_folder import make_exp_folder
from utils.hparams import HyperParams
from utils.csvlogger import CSVLogger
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
EPISODES = 1000 # Maximum number of episodes
#DQN Agent for the Cartpole
#Q function approximation with NN, experience replay, and target network
class DQNAgent:
#Constructor for the agent (invoked when DQN is first called in main)
def __init__(self, state_size, action_size, exp_folder, **kwargs):
# If True, stop if you satisfy solution confition
self.check_solve = False
# If you want to see Cartpole learning, then change to True
self.render = False
# Get size of state and action
self.state_size = state_size
self.action_size = action_size
self.exp_folder = make_exp_folder(exp_folder)
# Modify here
hparams = HyperParams(**kwargs)
hparams.display()
hparams.save_to_txt(self.exp_folder / "hparams.txt")
hparams.save_to_json(self.exp_folder / "hparams.json")
# Set hyper parameters for the DQN. Do not adjust those labeled as Fixed.
self.discount_factor = hparams.discount_factor
self.learning_rate = hparams.learning_rate
self.target_update_frequency = hparams.target_update_frequency
self.memory_size = hparams.memory_size
# -----------------------------
self.epsilon = 0.02 # Fixed
self.batch_size = 32 # Fixed
self.train_start = 1000 # Fixed
# Number of test states for Q value plots
self.test_state_no = 10000
#Create memory buffer using deque
self.memory = deque(maxlen=self.memory_size)
#Create main network and target network (using build_model defined below)
self.model = self.build_model()
self.target_model = self.build_model()
# save down the model summary
with open(self.exp_folder / 'model_summary.txt','w') as fh:
self.model.summary(print_fn=lambda x: fh.write(x + '\n'))
#Initialize target network
self.update_target_model()
def build_model(self):
"""
Approximate Q function using Neural Network
State is the input and the Q Values are the output.
See also:
https://keras.io/getting-started/sequential-model-guide/
"""
# Edit the Neural Network model here
model = Sequential()
model.add(Dense(8, input_dim=self.state_size, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(16, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(8, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(4, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='linear',
kernel_initializer='he_uniform'))
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# After some time interval update the target model to be same with model
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights())
def get_action(self, state):
""" Get action from model using epsilon-greedy policy
Args:
state ([type]): [description]
Returns:
[type]: An action
"""
if np.random.binomial(1, self.epsilon) == 1:
# random policy
action = random.randrange(self.action_size)
else:
# e-greedy policy
q_values = self.model.predict(state)
action = q_values.argmax()
return action
def append_sample(self, state, action, reward, next_state, done):
"""
Save sample <s,a,r,s'> to the replay memory
"""
# Add sample to the end of the list
self.memory.append((state, action, reward, next_state, done))
#Sample <s,a,r,s'> from replay memory
def train_model(self):
# Do not train if not enough memory
if len(self.memory) < self.train_start:
return
# Train on at most as many samples as you have in memory
batch_size = min(self.batch_size, len(self.memory))
# Uniformly sample the memory buffer
mini_batch = random.sample(self.memory, batch_size)
# -----------------------------------------------------
# Preallocate network and target network input matrices.
# -----------------------------------------------------
# batch_size by state_size two-dimensional array (not matrix!)
update_input = np.zeros((batch_size, self.state_size))
# Same as above, but used for the target network
update_target = np.zeros((batch_size, self.state_size))
# Empty arrays that will grow dynamically
# action, reward, done = [], [], []
action = list()
reward = list()
done = list()
# for i in range(self.batch_size):
for i in range(batch_size):
# Allocate s(i) to the network input array from iteration i
# in the batch
update_input[i] = mini_batch[i][0]
# Store a(i)
action.append(mini_batch[i][1])
# Store r(i)
reward.append(mini_batch[i][2])
# Allocate s'(i) for the target network array from iteration i
# in the batch
update_target[i] = mini_batch[i][3]
# Store done(i)
done.append(mini_batch[i][4])
# Generate target values for training the inner loop network using
# the network model
target = self.model.predict(update_input)
# Generate the target values for training the outer loop target network
target_val = self.target_model.predict(update_target)
# -----------------------------------------------------------
# Q Learning: get maximum Q value at s' from target network
# Read Part7.pdf, page 29
# -----------------------------------------------------------
# for i in range(self.batch_size): #For every batch
for i in range(batch_size):
if done[i]:
# if this is the episode ends
target[i][action[i]] = reward[i]
else:
# Consider also the future reward (Q-value predicted by outer loop)
target[i][action[i]] = (
reward[i] + self.discount_factor * np.max(target_val[i]))
# Train the inner loop network
self.model.fit(update_input, target, batch_size=self.batch_size,
epochs=1, verbose=0)
return
def plot_data(self, episodes, scores, max_q_mean):
"""
Plots the score per episode as well as the maximum q value per episode,
averaged over precollected states.
"""
pylab.figure(0)
pylab.plot(episodes, max_q_mean, 'b')
pylab.xlabel("Episodes")
pylab.ylabel("Average Q Value")
pylab.savefig(self.exp_folder / "qvalues.png")
pylab.figure(1)
pylab.plot(episodes, scores, 'b')
pylab.xlabel("Episodes")
pylab.ylabel("Score")
pylab.savefig(self.exp_folder / "scores.png")
###############################################################################
###############################################################################
if __name__ == "__main__":
# parser
parser = ArgumentParser(description='Lab 2 catpole dqn',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--men_size', type=int)
parser.add_argument('--lr', type=float)
parser.add_argument('--update_fq', type=int)
parser.add_argument('--discount', type=float)
parser.add_argument('--folder', default=str(Path("experiments/exp")),
help="The exp folder")
args = parser.parse_args()
exp_folder = args.folder
# set csv logger
logger = CSVLogger(join(exp_folder, "history.csv"))
# set hyperparams
hparams = dict()
if args.men_size:
hparams['memory_size'] = args.men_size
elif args.discount:
hparams['discount_factor'] = args.discount
elif args.update_fq:
hparams['target_update_frequency'] = args.update_fq
elif args.lr:
hparams['learning_rate'] = args.lr
# -------------------------------------------------------------------------
# For CartPole-v0, maximum episode length is 200
# Generate Cartpole-v0 environment object from the gym library
env = gym.make('CartPole-v0')
#Get state and action sizes from the environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
print("state_size: ", state_size)
print("action_size: ", action_size)
# Create agent, see the DQNAgent __init__ method for details
agent = DQNAgent(state_size, action_size, exp_folder=exp_folder, **hparams)
# Collect test states for plotting Q values using uniform random policy
test_states = np.zeros((agent.test_state_no, state_size))
max_q = np.zeros((EPISODES, agent.test_state_no))
max_q_mean = np.zeros((EPISODES,1))
done = True
for i in range(agent.test_state_no):
if done:
done = False
state = env.reset()
state = np.reshape(state, [1, state_size])
test_states[i] = state
else:
action = random.randrange(action_size)
next_state, reward, done, info = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
test_states[i] = state
state = next_state
scores, episodes = [], [] #Create dynamically growing score and episode counters
for e in range(EPISODES):
done = False
score = 0
state = env.reset() #Initialize/reset the environment
#Reshape state so that to a 1 by state_size two-dimensional array
# i.e. [x_1,x_2] to [[x_1,x_2]]
state = np.reshape(state, [1, state_size])
# Compute Q values for plotting
tmp = agent.model.predict(test_states)
max_q[e][:] = np.max(tmp, axis=1)
max_q_mean[e] = np.mean(max_q[e][:])
while not done:
if agent.render:
env.render() #Show cartpole animation
# Get action for the current state and go one step in environment
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
# Reshape next_state similarly to state
next_state = np.reshape(next_state, [1, state_size])
#Save sample <s, a, r, s'> to the replay memory
agent.append_sample(state, action, reward, next_state, done)
#Training step
agent.train_model()
score += reward #Store episodic reward
state = next_state #Propagate state
if done:
#At the end of very episode, update the target network
if e % agent.target_update_frequency == 0:
agent.update_target_model()
#Plot the play time for every episode
scores.append(score)
episodes.append(e)
print("episode:", e, " score:", score,
" q_value:", max_q_mean[e],
" memory length:", len(agent.memory))
logger.log(episode=e, score=score, q_value=max_q_mean[e][0],
men_len=len(agent.memory))
# if the mean of scores of last 100 episodes is bigger than 195
# stop training
if agent.check_solve:
if np.mean(scores[-min(100, len(scores)):]) >= 195:
print("solved after", e-100, "episodes")
agent.plot_data(episodes,scores,max_q_mean[:e+1])
sys.exit()
agent.plot_data(episodes,scores,max_q_mean)
| [
"[email protected]"
] | |
ecd2bf9d8a8924b7839511254ca2f355886cbded | 85601d534fbcc6df900af7509c189075a3112422 | /src/aulas/03_desafio/datas.py | 2c90d82c0273c05fb57f2b3dcbc61d6150d19ba8 | [] | no_license | claudimf/python_oo_1 | a58229abe5dc8a784e887ca94168bcdbbfc0f6ef | 1749456f709b850f5340adecd8b47f860184ca5a | refs/heads/main | 2023-03-22T14:17:16.232643 | 2021-03-09T20:14:23 | 2021-03-09T20:14:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | class Data:
def __init__(self, dia, mes, ano):
print("Construindo objeto...{}".format(self))
self.dia = dia
self.mes = mes
self.ano = ano
def formatada(self):
data = "{}/{}/{}".format(self.dia, self.mes, self.ano)
print(data)
return data
| [
"[email protected]"
] | |
9aa9b6f754e3fc9b59ddd7507b80c98344daad0a | a591cc3da6681c53a236e45f4fadaff5993254ab | /renpy/game.py | 1a4f0353fe4875a41251c8a1963e7680afbaf7ae | [] | no_license | StephenChan/renpy | 6b0a48eb5734cfa32bbcffc9a3c833be173aff38 | a0872d1cdccbadea24d03c482767e57999bd728b | refs/heads/master | 2020-12-25T06:51:12.428305 | 2012-07-17T09:06:32 | 2012-07-17T09:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,709 | py | # Copyright 2004-2012 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This module is intended to be used as a singleton object.
# It's purpose is to store in one global all of the data that would
# be to annoying to lug around otherwise.
import renpy.display
# The basepath.
basepath = None
# A list of paths that we search to load things. This is searched for
# everything that can be loaded, before archives are used.
searchpath = [ ]
# The options that were read off the command line.
args = None
# The game's script.
script = None
# A stack of execution contexts.
contexts = [ ]
# The interface that the game uses to interact with the user.
interface = None
# Are we inside lint?
lint = False
# The RollbackLog that keeps track of changes to the game state
# and to the store.
log = None
# Some useful additional information about program execution that
# can be added to the exception.
exception_info = ''
# Used to store style information.
style = None
# The set of statements we've seen in this session.
seen_session = { }
# The set of statements we've ever seen.
seen_ever = { }
# True if we're in the first interaction after a rollback or rollforward.
after_rollback = False
# Code that's run after the init code.
post_init = [ ]
# Should we attempt to run in a mode that uses less memory?
less_memory = False
# Should we attempt to run in a mode that minimizes the number
# of screen updates?
less_updates = False
# Should we never show the mouse?
less_mouse = False
# Should we not imagedissiolve?
less_imagedissolve = False
# The class that's used to hold the persistent data.
class Persistent(object):
def __setstate__(self, data):
vars(self).update(data)
def __getstate__(self):
return vars(self)
# Undefined attributes return None.
def __getattr__(self, attr):
return None
# The persistent data that's kept from session to session
persistent = Persistent()
class Preferences(renpy.object.Object):
"""
Stores preferences that will one day be persisted.
"""
__version__ = 4
def after_upgrade(self, version):
if version < 1:
self.mute_volumes = 0
if version < 2:
self.using_afm_enable = False
if version < 3:
self.physical_size = None
if version < 4:
self.renderer = "auto"
self.performance_test = True
def __init__(self):
self.fullscreen = False
self.skip_unseen = False
self.text_cps = 0
self.afm_time = 0
self.afm_enable = True
# These will be going away soon.
self.sound = True
self.music = True
# 2 - All transitions.
# 1 - Only non-default transitions.
# 0 - No transitions.
self.transitions = 2
self.skip_after_choices = False
# Mixer channel info.
# A map from channel name to the current volume (between 0 and 1).
self.volumes = { }
# True if the channel should not play music. False
# otherwise. (Not used anymore.)
self.mute = { }
# Joystick mappings.
self.joymap = dict(
joy_left="Axis 0.0 Negative",
joy_right="Axis 0.0 Positive",
joy_up="Axis 0.1 Negative",
joy_down="Axis 0.1 Positive",
joy_dismiss="Button 0.0")
# The size of the window, or None if we don't know it yet.
self.physical_size = None
# The graphics renderer we use.
self.renderer = "auto"
# Should we do a performance test on startup?
self.performance_test = True
def set_volume(self, mixer, volume):
self.volumes[mixer] = volume
def get_volume(self, mixer):
return self.volumes.get(mixer, 0)
def set_mute(self, mixer, mute):
self.mute[mixer] = mute
def get_mute(self, mixer):
return self.mute[mixer]
# The current preferences.
preferences = Preferences()
class RestartException(Exception):
"""
This class will be used to convey to the system that the context has
been changed, and therefore execution needs to be restarted.
"""
def __init__(self, contexts, label): # W0231
self.contexts = contexts
self.label = label
class FullRestartException(Exception):
"""
An exception of this type forces a hard restart, completely
destroying the store and config and so on.
"""
def __init__(self, reason="end_game"): # W0231
self.reason = reason
class UtterRestartException(Exception):
"""
An exception of this type forces an even harder restart, causing
Ren'Py and the script to be reloaded.
"""
class QuitException(Exception):
"""
An exception of this class will let us force a safe quit, from
anywhere in the program.
`relaunch`
If given, the program will run another copy of itself, with the
same arguments.
"""
def __init__(self, relaunch=False):
Exception.__init__(self)
self.relaunch = relaunch
class JumpException(Exception):
"""
This should be raised with a label as the only argument. This causes
the current statement to terminate, and execution to be transferred
to the named label.
"""
class JumpOutException(Exception):
"""
This should be raised with a label as the only argument. This exits
the current context, and then raises a JumpException.
"""
class ParseErrorException(Exception):
"""
This is raised when a parse error occurs, after it has been
reported to the user.
"""
# A tuple of exceptions that should not be caught by the
# exception reporting mechanism.
CONTROL_EXCEPTIONS = (
RestartException,
FullRestartException,
UtterRestartException,
QuitException,
JumpException,
JumpOutException,
ParseErrorException,
KeyboardInterrupt,
)
def context(index=-1):
"""
Return the current execution context, or the context at the
given index if one is specified.
"""
return contexts[index]
def invoke_in_new_context(callable, *args, **kwargs): #@ReservedAssignment
"""
This pushes the current context, and invokes the given python
function in a new context. When that function returns or raises an
exception, it removes the new context, and restores the current
context.
Additional arguments and keyword arguments are passed to the
callable.
Please note that the context so created cannot execute renpy
code. So exceptions that change the flow of renpy code (like
the one created by renpy.jump) cause this context to terminate,
and are handled by the next higher context.
If you want to execute renpy code from the function, you can call
it with renpy.call_in_new_context.
Use this to begin a second interaction with the user while
inside an interaction.
"""
context = renpy.execution.Context(False, contexts[-1], clear=True)
contexts.append(context)
try:
return callable(*args, **kwargs)
finally:
contexts.pop()
if interface.restart_interaction and contexts:
contexts[-1].scene_lists.focused = None
def call_in_new_context(label, *args, **kwargs):
"""
This code creates a new context, and starts executing code from
that label in the new context. Rollback is disabled in the
new context. (Actually, it will just bring you back to the
real context.)
Use this to begin a second interaction with the user while
inside an interaction.
"""
context = renpy.execution.Context(False, contexts[-1], clear=True)
contexts.append(context)
if args:
renpy.store._args = args
else:
renpy.store._args = None
if kwargs:
renpy.store._kwargs = renpy.python.RevertableDict(kwargs)
else:
renpy.store._kwargs = None
try:
context.goto_label(label)
context.run()
rv = renpy.store._return #@UndefinedVariable
context.pop_all_dynamic()
contexts.pop()
return rv
except renpy.game.JumpOutException, e:
context.pop_all_dynamic()
contexts.pop()
raise renpy.game.JumpException(e.args[0])
finally:
if interface.restart_interaction and contexts:
contexts[-1].scene_lists.focused = None
# Type information.
if False:
script = renpy.script.Script()
interface = renpy.display.core.Interface()
log = renpy.python.RollbackLog()
| [
"[email protected]"
] | |
d4ad2735c72bf142a87e0d9275110fef799a9d38 | 190072bc404751d83e5aceb99a34ccba1067caae | /twyg/tree.py | 4c498e375bc139a660b211918d217bbb7f9ffd7f | [] | no_license | karstenw/Library | ab751bde79bb0bd2bd7f705901dab415ba154476 | 9c3f665be4988c14d939d28e7729c72819bba446 | refs/heads/master | 2023-08-14T04:53:15.559747 | 2023-07-16T12:27:19 | 2023-07-16T12:27:19 | 46,520,062 | 0 | 0 | null | 2023-05-18T14:06:29 | 2015-11-19T21:00:38 | Python | UTF-8 | Python | false | false | 9,295 | py | import sys
from twyg.geom import Vector2, Rectangle
# py3 stuff
py3 = False
try:
unicode('')
punicode = unicode
pstr = str
punichr = unichr
except NameError:
punicode = str
pstr = bytes
py3 = True
punichr = chr
long = int
class Direction(object):
Top, Right, Bottom, Left = range(4)
def opposite_dir(d):
if d == Direction.Bottom:
return Direction.Top
if d == Direction.Right:
return Direction.Left
if d == Direction.Top:
return Direction.Bottom
if d == Direction.Left:
return Direction.Right
else:
raise ValueError( 'Invalid direction: %s' % d )
class Node(object):
def __init__(self, label, parent=None):
"""
Create a new node and associate it with a parent node. If
``parent`` is ``None``, a root node will be created.
"""
self.label = label
self.parent = parent
if parent:
parent.children.append(self)
self.children = []
self.x = 0
self.y = 0
# Property name (Python classes) to variable name (config expressions)
# mappings
self.property_mappings = {
'x': 'x',
'y': 'y',
'fontsize': 'fontSize',
'width': 'width',
'height': 'height',
'bboxwidth': 'bboxWidth',
'bboxheight': 'bboxHeight',
'textwidth': 'textWidth',
'textheight': 'textHeight',
'max_text_width': 'maxTextWidth',
'bgcolor': 'bgColor',
'basecolor': 'baseColor',
'fillcolor': 'fillColor',
'strokecolor': 'strokeColor',
'connectioncolor': 'connectionColor',
'fontcolor': 'fontColor',
'lineheight': 'lineHeight'
}
def isleaf(self):
return len(self.children) == 0
def isroot(self):
return self.parent == None
def depth(self):
depth = 0
curr = self
while curr.parent:
curr = curr.parent
depth += 1
return depth
def ancestor(self, n):
""" Get the n-th ancestor of this node.
If ``n`` is negative, the ancestor is counted from the root node.
If ``n`` is 0, the root node is returned.
"""
# If n is positive, get n-th ancestor from the node towards the root
if n > 0:
depth = 0
curr = self
while curr.parent:
curr = curr.parent
depth += 1
if depth == n:
return curr
raise ValueError( 'Node ancestor argument out of range: ' + n )
# If n is negative or zero, get n-th ancestor from the root
# towards the node
if n <= 0:
curr = self
ancestors = [curr]
while curr.parent:
curr = curr.parent
ancestors.append(curr)
if n == 0:
return ancestors[-1]
else:
n -= 1
if -n > len(ancestors):
raise ValueError( 'Node ancestor argument out of range: ' + n )
return ancestors[n]
def direction(self):
""" Get the position of the node in relation to its parent. """
if self.isroot():
return None
if self.x - self.parent.x < 0:
return Direction.Left
else:
return Direction.Right
def getchildren(self, direction=None):
if direction:
return [c for c in self.children
if c.direction() == direction]
else:
return self.children
def connection_point(self, direction):
return self.nodedrawer.drawer.connection_point(self, direction)
def shiftbranch(self, dx, dy):
self.x += dx
self.y += dy
for child in self.children:
child.shiftbranch(dx, dy)
class TreeBuilder(object):
def build_tree(self, tree):
#TODO proper error handling
if type(tree) != dict:
raise ValueError('Invalid JSON structure: Root element must be a dict')
# root = tree.iteritems().next()
if py3:
root = iter(tree.items()).__next__()
else:
root = tree.iteritems().next()
root_label = root[0]
children = root[1]
root_node = Node(root_label)
self._build_tree(root_node, children)
return root_node
def _build_tree(self, node, children):
if type(children) in (pstr, punicode):
Node(children, parent=node)
else:
for c in children:
if type(c) == dict:
child = Node( list(c.keys())[0], parent=node)
self._build_tree(child, list(c.values())[0])
elif type(c) in (list, tuple):
#TODO proper error handling
raise ValueError('Invalid JSON structure: Dicts cannot have List siblings')
else:
Node(c, parent=node)
class Tree(object):
def __init__(self, layout, nodedrawers, conndrawers, colorizers, data):
builder = TreeBuilder()
self.root = builder.build_tree(data)
self._nodelist = []
self._collect_nodes(self.root, self._nodelist)
self._layout = layout
self._nodedrawers = nodedrawers
self._conndrawers = conndrawers
self._colorizers = colorizers
# Precalculate the orientation of the nodes before assigning the
# drawer objects to them. It is important to do this before the
# assignment would occur, because the section level rules take
# the nodes' orientation into consideration when determining the
# correct drawer for them.
self._layout.precalc_layout(self.root)
self._assign_drawers()
def print_tree(self):
self._print_tree(self.root)
def calclayout(self):
for node in self._nodelist:
node.nodedrawer.drawer.precalc_node(node)
self._layout.calclayout(self.root)
self._colorize_nodes(self.root)
self.bbox = self._calcbbox()
self.shiftnodes(-self.bbox.x, -self.bbox.y)
return self.bbox.w, self.bbox.h
def shiftnodes(self, dx, dy):
self.root.shiftbranch(dx, dy)
def draw(self):
self._draw_connections(self.root)
self._draw_nodes()
def background_color(self):
# The background color for the canvas is always taken from the first
# colorizer instance.
return self._colorizers[0].drawer.background_color()
def _assign_drawers(self):
""" Assign the correct drawer objects for each node as specified
by the section level configurations."""
for node in self._nodelist:
for nd in self._nodedrawers:
if nd.level.selects(node, self._layout):
node.nodedrawer = nd
for cd in self._conndrawers:
if cd.level.selects(node, self._layout):
node.conndrawer = cd
for c in self._colorizers:
if c.level.selects(node, self._layout):
node.colorizer = c
def _print_tree(self, node):
print( ( " " * node.depth() * 2) + node.label )
for child in node.children:
self._print_tree(child)
def _collect_nodes(self, node, nodelist):
for child in node.children:
self._collect_nodes(child, nodelist)
nodelist.append(node)
def _draw_nodes(self):
self._nodelist.sort(key=lambda x: x.y, reverse=False)
for node in self._nodelist:
node.nodedrawer.drawer.draw(node)
pass
def _draw_connections(self, node):
for node in self._nodelist:
node.conndrawer.drawer.draw(node)
def _colorize_nodes(self, node):
node.colorizer.drawer.colorize(node)
for child in node.children:
self._colorize_nodes(child)
def _calcbbox(self):
m = 2**31 #sys.maxint
topleft = Vector2(m, m)
bottomright = Vector2(-m, -m)
self._calcbbox_recurse(self.root, topleft, bottomright)
return Rectangle(topleft.x, topleft.y,
bottomright.x - topleft.x, bottomright.y - topleft.y)
def _calcbbox_recurse(self, node, topleft, bottomright):
if node.x < topleft.x:
topleft.x = node.x
if node.y < topleft.y:
topleft.y = node.y
x2 = node.x + node.bboxwidth
y2 = node.y + node.bboxheight
if x2 > bottomright.x:
bottomright.x = x2
if y2 > bottomright.y:
bottomright.y = y2
for child in node.children:
self._calcbbox_recurse(child, topleft, bottomright)
| [
"[email protected]"
] | |
540ec371e20da9710f8b7d384d2aebac11bd4566 | 5d2d214fff5892d381d0328bca3db04b14e358fb | /final_design/tests/led_button/test.py | 6de08985003f3434fd31485b02d71f28402fba87 | [
"MIT"
] | permissive | DFEC-R2D2/r2d2 | 9552705188ed6e3d8c144881eb7c9ddfacfd8072 | 9b64233865ebfe9f0ca3f1b400b55cc8d6494adf | refs/heads/master | 2021-01-20T13:27:36.953410 | 2018-10-30T21:37:50 | 2018-10-30T21:37:50 | 90,496,130 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/env python
from __future__ import print_function
from time import sleep
import numpy as np
import sys
sys.path.append('../../python')
from library import ButtonLED
if __name__ == "__main__":
button = ButtonLED(16,26,20)
try:
while True:
choice = input("Enter LED color:\n0-Off\n1-Red\n2-Green\n3-Blue\n4-Quit\n>>")
if choice == 0:
button.setRGB(False, False, False)
elif choice == 1:
button.setRGB(True, False, False)
elif choice == 2:
button.setRGB(False, True, False)
elif choice == 3:
button.setRGB(False, False, True)
elif choice == 4:
break
except KeyboardInterrupt:
print("ctl-c")
button.setRGB(False, False, False)
| [
"[email protected]"
] | |
b0a5d2a9d7f6eb5e27604901ec38a320cfa7aed6 | fe39d984440b0dbb612b9d3e20b93a5e795ebefa | /part-1/py-10-recursia/r_04.py | 7aa71334dc89df0abd03f6784bd5e568ee65376f | [] | no_license | maximkavm/py-algorithmization-and-programming | f7d6b1b13d2557511aaccec6d9b006ac871c5477 | c1d8228601c3cea17e11e6a2736659ef395f675d | refs/heads/master | 2023-08-14T09:11:02.433801 | 2021-03-01T19:30:26 | 2021-03-01T19:30:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | '''
найти минимальный элемент списка
'''
def get(lst):
if len(lst) == 1:
return lst[0]
else:
# return lst[0] if lst[0] < get(lst[1:]) else get(lst[1:]) # дублирование вызова = это плохо
tmp = get(lst[1:])
return lst[0] if lst[0] < tmp else tmp # нет дублирования
lst = [8, 4, 6, 5, -7, 9]
# lst = [] # нет проверки на дурака
print(get(lst))
| [
"[email protected]"
] | |
838da3ee7f8801a79052070457d4856066a9b52b | ced56909016fb7c2175c3911fc8481bd5fdf0800 | /pytext/contrib/pytext_lib/resources/models.py | da9e9a7457bed0a62a52c84eb1994d4f3eb04695 | [
"BSD-3-Clause"
] | permissive | coderbyr/pytext | e258a3aae625e6a2fd386b60f25ac44a7b4149fe | 72c1ad835a30bef425494b02a6210f2e3232b1a4 | refs/heads/master | 2022-11-20T09:11:44.991716 | 2020-07-20T22:05:42 | 2020-07-20T22:07:15 | 281,286,078 | 1 | 0 | NOASSERTION | 2020-07-21T03:32:42 | 2020-07-21T03:32:41 | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
ROBERTA_BASE_TORCH = "roberta_base_torch"
ROBERTA_PUBLIC = "roberta_public"
XLMR_BASE = "xlmr_base"
XLMR_DUMMY = "xlmr_dummy"
URL = {
ROBERTA_BASE_TORCH: "https//dl.fbaipublicfiles.com/pytext/models/roberta/roberta_base_torch.pt", # noqa
ROBERTA_PUBLIC: "https//dl.fbaipublicfiles.com/pytext/models/roberta/roberta_public.pt1", # noqa
XLMR_BASE: "https://dl.fbaipublicfiles.com/pytext/models/xlm_r/checkpoint_base_1500k.pt", # noqa
XLMR_DUMMY: "https://dl.fbaipublicfiles.com/pytext/models/xlm_r/xlmr_dummy.pt", # noqa
}
| [
"[email protected]"
] | |
8c970a6df1bf767aedfed12bb7b0243ac4d8c558 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2022_02_01_preview/aio/operations/_data_collection_rules_operations.py | 7881913800ab760b33da28cfbf4ebef49b77aa54 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 29,803 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._data_collection_rules_operations import (
build_create_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataCollectionRulesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~$(python-base-namespace).v2022_02_01_preview.aio.MonitorManagementClient`'s
:attr:`data_collection_rules` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.DataCollectionRuleResource"]:
"""Lists all data collection rules in the specified resource group.
Lists all data collection rules in the specified resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionRuleResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionRuleResourceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataCollectionRuleResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules"} # type: ignore
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.DataCollectionRuleResource"]:
"""Lists all data collection rules in the specified subscription.
Lists all data collection rules in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionRuleResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionRuleResourceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataCollectionRuleResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Insights/dataCollectionRules"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, data_collection_rule_name: str, **kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Returns the specified data collection rule.
Returns the specified data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionRuleResource]
request = build_get_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"} # type: ignore
@overload
async def create(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[_models.DataCollectionRuleResource] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Creates or updates a data collection rule.
Creates or updates a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Creates or updates a data collection rule.
Creates or updates a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[Union[_models.DataCollectionRuleResource, IO]] = None,
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Creates or updates a data collection rule.
Creates or updates a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Is either a model type or a IO type. Default value is None.
:type body: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionRuleResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
if body is not None:
_json = self._serialize.body(body, "DataCollectionRuleResource")
else:
_json = None
request = build_create_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[_models.ResourceForUpdate] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Updates part of a data collection rule.
Updates part of a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: ~$(python-base-namespace).v2022_02_01_preview.models.ResourceForUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Updates part of a data collection rule.
Updates part of a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[Union[_models.ResourceForUpdate, IO]] = None,
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Updates part of a data collection rule.
Updates part of a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Is either a model type or a IO type. Default value is None.
:type body: ~$(python-base-namespace).v2022_02_01_preview.models.ResourceForUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2022_02_01_preview.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionRuleResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
if body is not None:
_json = self._serialize.body(body, "ResourceForUpdate")
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, data_collection_rule_name: str, **kwargs: Any
) -> None:
"""Deletes a data collection rule.
Deletes a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"} # type: ignore
| [
"[email protected]"
] | |
f8e579e2133a16d5c607a651e58a7752082a7494 | 2324dea2cb3003c8ab7e8fd80588d44973eb8c77 | /Euler_5_251b.py | e0502f1b9bb84fe6bcae764690f07d67cf20cb16 | [] | no_license | MikeOcc/MyProjectEulerFiles | 5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56 | 4d066d52380aade215636953589bf56d6b88f745 | refs/heads/master | 2021-01-16T18:45:44.133229 | 2015-05-27T18:28:43 | 2015-05-27T18:28:43 | 5,876,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | #
# Euler Problem 251
#
#
#
from math import *
from time import time
from itertools import combinations
from operator import mul
def RetFact(n):
#from time import time
#st = time()
ndiv = n
factlist=[ ]
ctr = 2
while ndiv >1:
#temp = ndiv
if (ndiv)%(ctr)==0:
factlist.append(ctr)
ndiv /= (ctr)
else:
ctr +=1
#print "process time",time()-st
return factlist
st = time()
ctr = 0
lim = 10000# 110000000
maxi,maxj,maxk=0,0,0
numturns =2000 # 60000
print "Cardano Numbers for limit with turns:", lim, numturns
for i in xrange(1,numturns):
a = i * 3 -1
f = (i**2) * (8*i -3)
if f< lim:
b=1;c=f
if a + b + c > lim:continue
ctr+=1
#print ctr,")",i,":",a, b,c,":",a+b+c,f
L = RetFact(f)
#M= L #remove
S = sorted(set(L))
P=[]
for k in S:
x=L.count(k)
if (x/2)>=1:
P.append(x/2)
else:
L.remove(k)
S = sorted(set(L))
L=list(S)
#print L
for m in xrange(len( P)):
if P[m]>1:
#print "P",P[m]
for j in xrange(1,P[m]):
#print i,j,S[m]
L.append(S[m])
R=[]
for jj in range(1, len(L)+1):
for subset in combinations(L, jj):
#print(subset)
R.append(reduce(mul,subset))
R=list(set(R))
R=sorted(R)
for j in R:
#print "J",j,R,f,M,S
if f/float(j*j) == int( f/j/j):
b = j; c = f/(j*j)
if a + b + c > lim:continue
ctr+=1
#print ctr,")",i,":",a, b,c,":",a+b+c,f
print "Number of Cardano Triplets <=",lim, "is", ctr
print "Process time is", time()-st
| [
"[email protected]"
] | |
c10f65b33d4bbfd4a45b433b2db3adb4e1b984bb | 3d8b4e0415bd4d818c17dcf0b3fc1676caad47b6 | /examples/switch/lan_host.py | 9d44db8a2e8d2843b1dd1a31f598978dc82fb447 | [
"MIT"
] | permissive | ShigemoriHakura/NintendoClients | 68c08ab2f54e36fb41eab2b2461bedd5bd1522be | da56e272f8504bcc8e5238128d63ab6ee44fa2c7 | refs/heads/master | 2022-03-09T19:20:05.647911 | 2022-03-01T23:35:01 | 2022-03-01T23:35:01 | 252,926,498 | 0 | 1 | MIT | 2022-03-01T23:35:02 | 2020-04-04T06:30:32 | null | UTF-8 | Python | false | false | 1,256 | py |
from nintendo.games import Splatoon2
from nintendo.pia import lan, settings, types
from nintendo.nex import common
import secrets
import random
import anyio
import math
import logging
logging.basicConfig(level=logging.INFO)
SESSION_ID = random.randint(1, 0xFFFFFFFF)
HOST_ID = random.randint(1, 0xFFFFFFFFFFFFFFFF)
HOST_NAME = "Yannik"
def handler():
host_address = types.StationLocation()
host = lan.LanStationInfo()
host.role = lan.LanStationInfo.HOST
host.username = HOST_NAME
host.id = HOST_ID
session = lan.LanSessionInfo()
session.game_mode = 0
session.session_id = SESSION_ID
session.attributes = [0, 0, 0, 0, 0, 0]
session.num_participants = 1
session.min_participants = 1
session.max_participants = 10
session.system_version = 5
session.application_version = 66
session.session_type = 0
session.application_data = HOST_NAME.encode("utf-16le").ljust(74, b"\0")
session.is_opened = True
session.host_location = host_address
session.stations[0] = host
session.session_param = secrets.token_bytes(32)
return [session]
async def main():
s = settings.default(Splatoon2.PIA_VERSION)
async with lan.serve(s, handler, Splatoon2.PIA_KEY):
print("LAN server is running...")
await anyio.sleep(math.inf)
anyio.run(main)
| [
"[email protected]"
] | |
ac2da397f5bb79b0908c86ef4b342c2b147f18b2 | 7759122052337252217fff9d51ec6d125ef370e0 | /iq/util/str_func.py | 5b46708639867cb94f43aefafcae016472ee0243 | [] | no_license | XHermitOne/iq_framework | 3325670c74233d99e599921fad4bd41e5d8104f3 | 7550e242746cb2fb1219474463f8db21f8e3e114 | refs/heads/master | 2023-09-03T21:07:58.107750 | 2023-09-01T07:30:13 | 2023-09-01T07:30:13 | 195,210,479 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,674 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Strings and text manipulate functions.
"""
import encodings.aliases
from . import log_func
from .. import global_data
__version__ = (0, 0, 1, 1)
def getEncodings():
"""
Supported code page list.
"""
try:
encode_list = [str(code).lower() for code in encodings.aliases.aliases.values()]
result = list()
for code in encode_list:
if code not in result:
result.append(code)
result.sort()
return tuple(result)
except:
return 'utf_8',
def replaceUpper2Lower(txt):
"""
Replacing uppercase letters in a string with underscores
except the first character.
:param txt: Text string as AbcdEfghIklmn.
:return: Modified text as abcd_efgh_iklmn.
"""
if not isinstance(txt, str):
txt = str(txt)
txt = ''.join([symb.lower() if i and symb.isupper() and txt[i-1].isupper() else symb for i, symb in enumerate(list(txt))])
return ''.join([('_'+symb.lower() if symb.isupper() and i else symb.lower()) for i, symb in enumerate(list(txt))])
def replaceLower2Upper(txt):
"""
Replacing lowercase letters in a string with underscores
except the first character.
:param txt: Text string as abcd_efgh_iklmn.
:return: Modified text as AbcdEfghIklmn.
"""
if not isinstance(txt, str):
txt = str(txt)
return ''.join([(symb.upper() if not i or (i and symb.islower() and txt[i-1] == '_') else symb.lower()) for i, symb in enumerate(list(txt)) if symb != '_'])
INDENT = ' '
def data2txt(data, level=0):
"""
Translation of a dictionary-list structure into formatted text.
:param data: Vocabulary list structure.
:param level: Nesting level.
:return: Formatted text.
"""
txt = ''
try:
if isinstance(data, list):
txt = txt + '\n' + level * INDENT + '[\n'
for obj in data:
txt += level * INDENT
txt += data2txt(obj, level + 1)
txt += ',\n'
if len(data) != 0:
txt = txt[:-2]
txt = txt + '\n' + level * INDENT + ']'
elif isinstance(data, dict):
txt = txt + '\n' + level * INDENT + '{\n'
keys = data.keys()
values = data.values()
for key in keys:
txt = txt + level * INDENT + '\'' + key + '\':'
txt += data2txt(data[key], level + 1)
txt += ',\n'
if len(keys) != 0:
txt = txt[:-2]
txt = txt + '\n' + level * INDENT + '}'
elif isinstance(data, str):
# Check for quotes
txt = txt + '\'' + data.replace('\'',
'\\\'').replace('\'',
'\\\'').replace('\r',
'\\r').replace('\n',
'\\n').replace('\t',
'\\t') + '\''
else:
txt = txt + str(data)
# Remove first carriage return
if txt[0] == '\n' and (not level):
txt = txt[1:]
except:
log_func.fatal(u'Error transform data to text. Level <%d>' % level)
return txt
RU_ENCODINGS = {'UTF-8': 'utf-8',
'CP1251': 'windows-1251',
'KOI8-R': 'koi8-r',
'IBM866': 'ibm866',
'ISO-8859-5': 'iso-8859-5',
'MAC': 'mac',
}
def getCodepage(text=None):
"""
Definition of text encoding.
Function call example:
print(RU_ENCODINGS[getCodepage(file('test.txt').read())])
There is an alternative encoding definition (using chardet):
a = 'sdfds'
import chardet
print(chardet.detect(a))
{'confidence': 1.0, 'encoding': 'ascii'}
a = 'any text'
print(chardet.detect(a))
{'confidence': 0.99, 'encoding': 'utf-8'}
"""
uppercase = 1
lowercase = 3
utfupper = 5
utflower = 7
codepages = {}
for enc in RU_ENCODINGS.keys():
codepages[enc] = 0
if text is not None and len(text) > 0:
last_simb = 0
for simb in text:
simb_ord = ord(simb)
# non-russian characters
if simb_ord < 128 or simb_ord > 256:
continue
# UTF-8
if last_simb == 208 and (143 < simb_ord < 176 or simb_ord == 129):
codepages['UTF-8'] += (utfupper * 2)
if (last_simb == 208 and (simb_ord == 145 or 175 < simb_ord < 192)) \
or (last_simb == 209 and (127 < simb_ord < 144)):
codepages['UTF-8'] += (utflower * 2)
# CP1251
if 223 < simb_ord < 256 or simb_ord == 184:
codepages['CP1251'] += lowercase
if 191 < simb_ord < 224 or simb_ord == 168:
codepages['CP1251'] += uppercase
# KOI8-R
if 191 < simb_ord < 224 or simb_ord == 163:
codepages['KOI8-R'] += lowercase
if 222 < simb_ord < 256 or simb_ord == 179:
codepages['KOI8-R'] += uppercase
# IBM866
if 159 < simb_ord < 176 or 223 < simb_ord < 241:
codepages['IBM866'] += lowercase
if 127 < simb_ord < 160 or simb_ord == 241:
codepages['IBM866'] += uppercase
# ISO-8859-5
if 207 < simb_ord < 240 or simb_ord == 161:
codepages['ISO-8859-5'] += lowercase
if 175 < simb_ord < 208 or simb_ord == 241:
codepages['ISO-8859-5'] += uppercase
# MAC
if 221 < simb_ord < 255:
codepages['MAC'] += lowercase
if 127 < simb_ord < 160:
codepages['MAC'] += uppercase
last_simb = simb_ord
idx = ''
max_cp = 0
for item in codepages:
if codepages[item] > max_cp:
max_cp = codepages[item]
idx = item
return idx
def recodeText(txt, src_codepage='cp1251', dst_codepage='utf-8'):
"""
Transcode text from one encoding to another.
:param txt: Source text.
:param src_codepage: Source code page.
:param dst_codepage: Destination code page.
:return: Recoded text in a new encoding.
"""
unicode_txt = toUnicode(txt, src_codepage)
if isinstance(unicode_txt, str):
return unicode_txt.encode(dst_codepage)
log_func.warning(u'Error recode text <%s>' % str(txt))
return None
def toUnicode(value, code_page='utf-8'):
"""
Convert any value to unicode.
:param value: Value.
:param code_page: Code page.
"""
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode(code_page)
return str(value)
RUS2LAT = {u'а': 'a', u'б': 'b', u'в': 'v', u'г': 'g', u'д': 'd', u'е': 'e', u'ё': 'yo', u'ж': 'j',
u'з': 'z', u'и': 'i', u'й': 'y', u'к': 'k', u'л': 'l', u'м': 'm', u'н': 'n', u'о': 'o', u'п': 'p',
u'р': 'r', u'с': 's', u'т': 't', u'у': 'u', u'ф': 'f', u'х': 'h', u'ц': 'c', u'ч': 'ch',
u'ш': 'sh', u'щ': 'sch', u'ь': '', u'ы': 'y', u'ъ': '', u'э': 'e', u'ю': 'yu', u'я': 'ya',
u'А': 'A', u'Б': 'B', u'В': 'V', u'Г': 'G', u'Д': 'D', u'Е': 'E', u'Ё': 'YO', u'Ж': 'J',
u'З': 'Z', u'И': 'I', u'Й': 'Y', u'К': 'K', u'Л': 'L', u'М': 'M', u'Н': 'N', u'О': 'O', u'П': 'P',
u'Р': 'R', u'С': 'S', u'Т': 'T', u'У': 'U', u'Ф': 'F', u'Х': 'H', u'Ц': 'C', u'Ч': 'CH',
u'Ш': 'SH', u'Щ': 'SCH', u'Ь': '', u'Ы': 'Y', u'Ъ': '', u'Э': 'E', u'Ю': 'YU', u'Я': 'YA'}
def rus2lat(text, translate_dict=RUS2LAT):
"""
Translation of Russian letters into Latin according to the dictionary of substitutions.
"""
if isinstance(text, bytes):
# To unicode
text = text.decode(global_data.DEFAULT_ENCODING)
txt_list = list(text)
txt_list = [translate_dict.setdefault(ch, ch) for ch in txt_list]
return ''.join(txt_list)
def isLATText(text):
"""
The text is written in Latin?
"""
if isinstance(text, str):
rus_chr = [c for c in text if ord(c) > 128]
return not bool(rus_chr)
# This is not a string
return False
def isRUSText(text):
"""
String with Russian letters?
"""
if isinstance(text, str):
rus_chr = [c for c in text if ord(c) > 128]
return bool(rus_chr)
# This is not a string
return False
def isWordsInText(text, words, case_sensitivity=True):
"""
Are there words in the text?
The search is conducted before the first finding of one of the indicated words.
:param text: Text.
:param words: Words.
:param case_sensitivity: Check case sensitive?
:return: True (there are such words in the text)/False (no words found).
"""
if not isinstance(text, str):
text = str(text)
find = False
for word in words:
if case_sensitivity:
# Case sensitive check
find = word in text
else:
# Case insensitive check
find = word.lower() in text.lower()
if find:
break
return find
def startswithWords(text, words, case_sensitivity=True):
"""
Search for words at the beginning of the text.
The search is carried out until the first finding of one of the specified words.
:param text: Text.
:param words: Words.
:param case_sensitivity: Check case sensitive?
:return: True (there are such words at the beginning of the text)/False (words not found).
"""
if not isinstance(text, str):
text = str(text)
find = False
for word in words:
if case_sensitivity:
# Case sensitive check
find = text.startswith(word)
else:
# Case insensitive check
find = text.lower().startswith(word.lower())
if find:
break
return find
def endswithWords(text, words, case_sensitivity=True):
"""
Search for words at the end of the text.
The search is carried out until the first finding of one of the specified words. \
:param text: Text.
:param words: Words.
:param case_sensitivity: Check case sensitive?
:return: True (there are such words at the end of the text )/False (words not found).
"""
if not isinstance(text, str):
text = str(text)
find = False
for word in words:
if case_sensitivity:
# Case sensitive check
find = text.endswith(word)
else:
# Case insensitive check
find = text.lower().endswith(word.lower())
if find:
break
return find
def isMultiLineText(text=u''):
"""
Checking that the text is many lines.
:param text: Text.
:return: True - multi line text, False - one line, None - error.
"""
if not isinstance(text, str):
# If the type does not match the text, then the error is
return None
return u'\n' in text.strip()
def isDigitsInText(text):
"""
Checking the presence of numbers in the text.
:param text: Text.
:return: True - there are numbers in the text / False - no numbers .
"""
for symbol in text:
if symbol.isdigit():
return True
return False
def isSerialSymbol(text, symbol=' '):
"""
Checking what the text is
a sequence of one specific character.
:param text: Text.
:param symbol: Symbol.
:return: True/False.
"""
if not text or not isinstance(text, str):
# If it is an empty string, then it is not a sequence at all
return False
return all([symb == symbol for symb in text])
def isSerial(text):
"""
Checking what the text is a sequence of one character.
:param text: Text.
:return: True/False.
"""
return isSerialSymbol(text, text[0]) if text and isinstance(text, str) else False
def isSerialZero(text):
"""
Checking what the text is a sequence of one character '0'.
:param text: Text.
:return: True/False.
"""
return isSerialSymbol(text, '0')
def getStrDigit(text):
"""
Get all numbers from a string of text as a string.
:param text: Text. For example '12ASD321'.
:return: Text with numbers . For example '12321'
"""
return u''.join([symb for symb in text if symb.isdigit()])
def getStrDigitAsInt(text):
"""
Get all digits from a string of text as an integer.
:param text: Text. For example '12ASD321'.
:return: Integer. For example 12321. If there are no digits, then 0 is returned.
"""
num_txt = getStrDigit(text)
return int(num_txt) if num_txt else 0
def replaceInText(text, replacements):
"""
Make a number of replacements in the text.
:param text: Text.
:param replacements: Replacements.
Can be specified as a dictionary or a list of tuples.
Dictionary:
{
'source replace': 'destination replace', ...
}
List of tuples (used when the order of replacements is important ):
[
('source replace', 'destination replace'), ...
]
:return: The text with all the replacements made, or the original text in case of an error.
"""
result_text = text
try:
if isinstance(replacements, dict):
for src_txt, dst_txt in replacements.items():
result_text = result_text.replace(src_txt, dst_txt)
elif isinstance(replacements, list) or isinstance(replacements, tuple):
for src_txt, dst_txt in replacements:
result_text = result_text.replace(src_txt, dst_txt)
else:
# Incorrect type
return text
return result_text
except:
log_func.fatal(u'Error replace in text')
return text
def deleteInText(text, delete_txt_list):
"""
Remove lines from text.
:param text: Text.
:param delete_txt_list: List of lines to remove from text.
:return: The text with all the replacements made, or the original text in case of an error.
"""
replacements = [(str(delete_txt), u'') for delete_txt in delete_txt_list]
return replaceInText(text, replacements=replacements)
def deleteSymbolInText(text, symbol=u' '):
"""
Remove character from text.
:param text: Text.
:param symbol: The character to remove.
:return: Text with a deleted character, or the original text in case of an error.
"""
return deleteInText(text, (symbol, ))
def isFloatStr(text):
"""
Determine if a string is a floating point number.
:param text: Text.
:return: True/False
"""
try:
float(text)
return True
except ValueError:
return False
def isIntStr(text):
"""
Determine if a string is an integer.
:param text: Text.
:return: True/False
"""
return text.isdigit()
def isNoneStr(text):
"""
Determine if string is None.
:param text: Text.
:return: True/False
"""
return text.strip() == 'None'
def parseWiseTypeStr(text):
"""
Type conversion from string to real type.
:param text: Text.
:return: Real type value.
For example:
text = 'None' - None
text = '099' - 99
text = '3.14' - 3.14
text = 'XYZ' - 'XYZ'
"""
if isNoneStr(text):
return None
elif isIntStr(text):
return int(text)
elif isFloatStr(text):
return float(text)
# String
return text
def limitTextLen(text, length, filler=u' '):
"""
Limit the text to length.
If the text is larger, then the last characters are cut off.
If the text is smaller, then characters are added to the end of the text
filling up to the specified length.
:param text: Source text.
:param length: The length of the resulting text.
:param filler: The filler symbol.
:return: Edited text of a certain length.
"""
if not isinstance(text, str):
text = str(text)
if len(filler) > 1:
filler = filler[0]
elif len(filler) == 0:
filler = u' '
if len(text) > length:
return text[:length]
else:
text += filler * (length - len(text))
return text
| [
"[email protected]"
] | |
574ff80f57a6e8ad726a37f117325a4420e6af8d | 31945f926e766e2a30ec8cfa720c32a943d84cea | /tests/data/test_split.py | f661482dece4d010326141046042b736a2003ba6 | [
"MIT"
] | permissive | mcorniere/rxsci | 3854129bbf1af727b40cfbc2cb4dedaa27bc8e5f | 7c2bf433a760d4ecab8aee56227803a190458fbe | refs/heads/master | 2023-02-10T05:56:03.607556 | 2021-01-05T14:41:09 | 2021-01-05T14:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | import rx
import rx.operators as ops
import rxsci as rs
def test_split():
source = ["1a", "2a", "3b", "4b", "5c", "6c", "7c", "8d", "9d"]
source = [
rs.OnCreateMux((1 ,None)),
rs.OnNextMux((1, None), '1a'),
rs.OnNextMux((1, None), '2a'),
rs.OnNextMux((1, None), '3b'),
rs.OnNextMux((1, None), '4b'),
rs.OnNextMux((1, None), '5c'),
rs.OnNextMux((1, None), '6c'),
rs.OnNextMux((1, None), '7c'),
rs.OnNextMux((1, None), '8d'),
rs.OnNextMux((1, None), '9d'),
rs.OnCompletedMux((1, None)),
]
actual_result = []
mux_actual_result = []
expected_result = [
["1a", "2a"],
["3b", "4b"],
["5c", "6c", "7c"],
["8d", "9d"],
]
def on_next(i):
actual_result.append(i)
store = rs.state.StoreManager(store_factory=rs.state.MemoryStore)
rx.from_(source).pipe(
rs.cast_as_mux_observable(),
rs.state.with_store(
store,
rs.data.split(lambda i: i[-1], rx.pipe(
ops.do_action(mux_actual_result.append),
)),
),
).subscribe(on_next)
assert type(mux_actual_result[0]) is rs.state.ProbeStateTopology
assert mux_actual_result[1:] == [
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '1a', store),
rs.OnNextMux((1, (1, None)), '2a', store),
rs.OnCompletedMux((1, (1, None)), store),
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '3b', store),
rs.OnNextMux((1, (1, None)), '4b', store),
rs.OnCompletedMux((1, (1, None)), store),
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '5c', store),
rs.OnNextMux((1, (1, None)), '6c', store),
rs.OnNextMux((1, (1, None)), '7c', store),
rs.OnCompletedMux((1, (1, None)), store),
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '8d', store),
rs.OnNextMux((1, (1, None)), '9d', store),
rs.OnCompletedMux((1, (1, None)), store),
]
assert actual_result == source
| [
"[email protected]"
] | |
cc35a48b6c83a8e7b4678d1337571699e26f2e26 | 8acbd7fcfe1bcf94e4e895e58ac5c81f8ed13741 | /fees/migrations/0001_initial.py | 4d2a79465920cf7560126359a04d00bea99675fb | [] | no_license | Rajangupta09/School-beta | 440af5d5d078a46036cfa3c50865f980c5ff1ace | 3ca6ca9992d2b47bcfe1762beb8c88609d519ea5 | refs/heads/master | 2022-12-07T19:42:19.562804 | 2020-08-04T09:53:04 | 2020-08-04T09:53:04 | 284,509,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | # Generated by Django 3.0.3 on 2020-05-16 14:24
from django.db import migrations, models
import django.db.models.deletion
import fees.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('classform', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FeeCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('once', models.BooleanField(default=True)),
('submission_type', models.CharField(max_length=50)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='FeeCycle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lastSubmissionDate', models.DateField()),
('firstSubmissionDate', models.DateField()),
('cycle', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Fine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fine', models.IntegerField()),
('category', models.CharField(max_length=100)),
('submissionDate', models.DateField(null=True)),
('description', models.TextField()),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoomStudent')),
],
),
migrations.CreateModel(
name='FeeDiscount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('discount', models.IntegerField()),
('category', models.CharField(max_length=50)),
('description', models.TextField()),
('total_off', models.IntegerField()),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoomStudent')),
],
),
migrations.CreateModel(
name='Fee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('regNo', models.IntegerField()),
('payment_method', models.CharField(max_length=50)),
('submissionDate', models.DateField()),
('amount', models.IntegerField()),
('monthsPaid', models.CharField(max_length=50)),
('feeSlip', models.FileField(upload_to=fees.models.user_directory_path)),
('classSection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoom')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoomStudent')),
],
),
migrations.CreateModel(
name='ClassSectionFees',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fees', models.IntegerField()),
('classSection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoom')),
('feeCategory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fees.FeeCategory')),
],
),
]
| [
"[email protected]"
] | |
609d8b4f4188c8ac49f0d80a912a0960915ab279 | ab221e6778959a17a40585defdcf17b2ebf34908 | /SpellingBeePlay.py | fad7da80b2739504abe8ad2716ca673962296735 | [] | no_license | Mystified131/NYTImesGamesSolutions | f1e6a0905120764e9079666e6b59aa4201bf3270 | 349118cd1784888177b7475bcc7bd3b8ea20ba62 | refs/heads/master | 2023-03-23T17:02:25.341274 | 2021-03-07T09:02:13 | 2021-03-07T09:02:13 | 287,679,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py |
import enchant
from itertools import permutations
import datetime
import random
from subprocess import call
right_now = datetime.datetime.now().isoformat()
tlist = []
for i in right_now:
if i.isnumeric():
tlist.append(i)
tim = ("".join(tlist))
d = enchant.Dict("en_US")
conlst = ["B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "X", "Z"]
vowlst = ["A", "E", "I", "O", "U", "Y"]
sublst = []
for x in range(26):
ctr = random.randrange(10)
if ctr < 5:
vch = random.randrange(6)
letch = vowlst[vch]
if ctr > 4:
cch = random.randrange(20)
letch = conlst[cch]
if letch not in sublst:
sublst.append(letch)
oplst = []
for x1 in range(7):
astr = sublst[x1]
oplst.append(astr)
x2 = random.randrange(7)
keyletr = oplst[x2]
biglst = []
for elem in oplst:
for x in range(2):
biglst.append(elem)
print("")
print(oplst)
print("")
print("The key letter is: ", keyletr)
print("")
print("This may take a moment, as a puzzle and results are generated.")
print("")
#print("Here are the solutions, if there are any:")
#print("")
#print("Check for modern parlance. Some may not appear in NY Times word list:")
#print("")
wdlst = []
worm = list(permutations(biglst, 4))
for elem in worm:
astr = ""
for wor in elem:
astr += wor
if d.check(astr) and astr not in wdlst and keyletr in astr:
print("Generating possible answers- ", len(wdlst))
print("")
wdlst.append(astr)
worm = []
worn = list(permutations(biglst, 5))
for elem in worn:
astr = ""
for wor in elem:
astr += wor
if d.check(astr) and astr not in wdlst and keyletr in astr:
print("Generating possible answers- ", len(wdlst))
print("")
wdlst.append(astr)
worn = []
#woro = list(permutations(biglst, 6))
#for elem in woro:
#astr = ""
#for wor in elem:
#astr += wor
#if d.check(astr) and astr not in wdlst and keyletr in astr:
#print("Generating possible answers- ", len(wdlst))
#print("")
#wdlst.append(astr)
#woro = []
#worp = list(permutations(biglst, 7))
#for elem in worp:
#astr = ""
#for wor in elem:
#astr += wor
#if d.check(astr) and astr not in wdlst and keyletr in astr:
#print("Generating possible answers- ", len(wdlst))
#print("")
#wdlst.append(astr)
#worp = []
#print("")
#print(wdlst)
#print("")
titstr = "SpellingBeeCreation." + tim + ".txt"
outfile = open(titstr, "w")
outfile.write ("A Tenable Spelling Bee Creation." + '\n' )
outfile.write ("" + '\n' )
outfile.write ("Time: " + tim + '\n')
outfile.write ("" + '\n' )
for elem in oplst:
outfile.write(elem + ",")
outfile.write('\n')
outfile.write (keyletr + '\n' )
outfile.write ("" + '\n' )
for elem in wdlst:
outfile.write(elem + '\n')
outfile.close()
print("")
print("See the answers document in the same folder as your code.")
print("")
pts = 0
if len(wdlst) == 0:
print("")
print("Trying again!")
print("")
call(["python", "SpellingBeePlay.py"])
for ctr in range(1000):
print("")
print("Here are the letter possibilities: ", oplst)
print("")
print("Here is the key letter, to be in each guess at least once: ", keyletr)
print("")
anstr = input("Please enter a guess, 4 or 5 total letters: ")
if anstr.upper() not in wdlst:
print("")
print("That guess does not match any of our saved words, or is a repeat. Check that only given letters appear, and that the key letter is there.")
print("")
print("We also may miss some words.")
print("")
print("Your current score is: ", pts)
print("")
if anstr.upper() in wdlst:
print("")
print("You got one!")
scr = len(anstr)
pts += scr
print("")
print("You get ", scr, " points added for a total of ", pts, " total points.")
print("")
wdlst.remove(anstr.upper())
qtstr = input("Please press q to quit, or anything else to continue: ")
if qtstr == "q" or len(wdlst == 0):
break
print("")
print("You have indicated q, or the list of words is finished.")
print("")
print("Your final score is: ", pts)
print("")
print("Thanks for playing!")
print("")
## THE GHOST OF THE SHADOW ## | [
"[email protected]"
] | |
779f6b0f802f2a2118bacba5ad0e88316ae714b0 | d58bc2475a41e7c36e22947565c099908f84cfd6 | /samples/openapi3/client/petstore/python-experimental/petstore_api/paths/store_inventory/get.py | 76bb8cb65578587169f50f0290859796eaadb706 | [
"Apache-2.0"
] | permissive | yaronius/openapi-generator | d8390dc2cfd9330d3f05a1f517612d793e332ead | 9f3fac53c1689b82bf4c99b664e10e4a5decfb8e | refs/heads/master | 2022-11-03T02:27:44.670087 | 2022-08-17T12:17:30 | 2022-08-17T12:17:30 | 175,407,506 | 0 | 0 | Apache-2.0 | 2023-09-04T20:41:29 | 2019-03-13T11:30:05 | Java | UTF-8 | Python | false | false | 5,532 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
import functools # noqa: F401
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
NoneClass,
BoolClass,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
from . import path
_auth = [
'api_key',
]
class SchemaFor200ResponseBodyApplicationJson(
DictSchema
):
_additional_properties = Int32Schema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*args,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: Unset = unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _get_inventory(
self: api_client.Api,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
Returns pet inventories by status
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class GetInventory(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def get_inventory(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._get_inventory(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def get(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._get_inventory(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
] | |
8b51d93cbec4433a5ff954290538addaaeac3696 | 4bc2af514877135a222826b2c5ac48632045f2fa | /django/juziom/userauth/form.py | d1f13721d408eb598e6074dd8485a31d79aa2690 | [] | no_license | 18734865664/python | 1853481ac1dcd515f691cfc11557f76fbbb083de | 25bc355ddb2abefc5a3736fb99e6345138ebbefc | refs/heads/master | 2020-03-17T09:37:57.469741 | 2018-06-28T08:41:37 | 2018-06-28T08:41:37 | 133,482,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | #coding:utf8
from django import forms
from models import *
class LoginForm(forms.Form):
username = forms.CharField(max_length=255, widget=forms.TextInput(attrs={'class': 'form-control','placeholder':u'用户名'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control','placeholder':u'密码'}))
class ChangepwdForm(forms.Form):
oldpassword = forms.CharField(
required=True,
label=u"原密码",
error_messages={'required': u'请输入原密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"原密码",
}
),
)
newpassword1 = forms.CharField(
required=True,
label=u"新密码",
error_messages={'required': u'请输入新密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"新密码",
}
),
)
newpassword2 = forms.CharField(
required=True,
label=u"确认密码",
error_messages={'required': u'请再次输入新密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"确认密码",
}
),
)
def clean(self):
if not self.is_valid():
raise forms.ValidationError(u"所有项都为必填项")
elif self.cleaned_data['newpassword1'] <> self.cleaned_data['newpassword2']:
raise forms.ValidationError(u"两次输入的新密码不一样")
else:
cleaned_data = super(ChangepwdForm, self).clean()
return cleaned_data | [
"[email protected]"
] | |
cb7986e98a23496bf14138ac78df7fe003dfd9e5 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/deployment_operation_properties_py3.py | 5f38d5397daca470b2e2a824ae06d5fcf2d20902 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,927 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentOperationProperties(Model):
"""Deployment operation properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provisioning_state: The state of the provisioning.
:vartype provisioning_state: str
:ivar timestamp: The date and time of the operation.
:vartype timestamp: datetime
:ivar service_request_id: Deployment operation service request id.
:vartype service_request_id: str
:ivar status_code: Operation status code.
:vartype status_code: str
:ivar status_message: Operation status message.
:vartype status_message: object
:ivar target_resource: The target resource.
:vartype target_resource:
~azure.mgmt.resource.resources.v2016_09_01.models.TargetResource
:ivar request: The HTTP request message.
:vartype request:
~azure.mgmt.resource.resources.v2016_09_01.models.HttpMessage
:ivar response: The HTTP response message.
:vartype response:
~azure.mgmt.resource.resources.v2016_09_01.models.HttpMessage
"""
_validation = {
'provisioning_state': {'readonly': True},
'timestamp': {'readonly': True},
'service_request_id': {'readonly': True},
'status_code': {'readonly': True},
'status_message': {'readonly': True},
'target_resource': {'readonly': True},
'request': {'readonly': True},
'response': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'service_request_id': {'key': 'serviceRequestId', 'type': 'str'},
'status_code': {'key': 'statusCode', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'object'},
'target_resource': {'key': 'targetResource', 'type': 'TargetResource'},
'request': {'key': 'request', 'type': 'HttpMessage'},
'response': {'key': 'response', 'type': 'HttpMessage'},
}
def __init__(self, **kwargs) -> None:
super(DeploymentOperationProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.timestamp = None
self.service_request_id = None
self.status_code = None
self.status_message = None
self.target_resource = None
self.request = None
self.response = None
| [
"[email protected]"
] | |
53a93667c441c6cbc84bc7b42295359c9203f3ab | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_1024_Logit_PolyTrend_7_12_20.py | 356fc68289ce4b41a07aca9517fc5a4f10b4de0d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 267 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"[email protected]"
] | |
c78f25abc910c9fb03d1dc449d75d91bae270bc1 | 0fb0dba210ff0f63515c464d7acc95ae32d7603c | /File Operations/Hide a File or Folder/hide-a-file-or-folder.py | 4bfe8e23f58b91670e5d641e1f6a6d46d39681d6 | [] | no_license | slad99/pythonscripts | 7cbe6b8bb27c8c06e140c46e7c8cf286cbc56d8e | 4e0ebb023899a602cb041ef6f153fd3b7ab032e9 | refs/heads/master | 2022-01-04T21:49:10.486758 | 2019-06-28T14:29:28 | 2019-06-28T14:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | PATH =r'C:\Users\ac\Desktop\AdobeAIRInstaller.exe' # give the path of the file or folder to hidden
import ctypes
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
import os
from subprocess import Popen, PIPE
if os.path.exists(PATH):
with disable_file_system_redirection():
OBJ = Popen('attrib +H '+PATH, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell=True)
RES = OBJ.communicate()
RET = OBJ.returncode
if RET == 0:
print PATH+' is hidden successfully'
else:
print RES[1]
else:
print '1: Sorry! Given path is not available.'
| [
"[email protected]"
] | |
c168a6f74e4634d04716ba3f0e4d90c19a4f44c2 | 62ff0d3e132b5ab34a8ce309bc7b601e5240196f | /md_importer/tests/test_branch_import.py | b6f5a2de092a17e04369bcabcfedf49ad8d09a41 | [] | no_license | canonical-web-and-design/developer.ubuntu.com-legacy | 2ea766d9a4f42e2ab86fb1950506c0aeb6b5b700 | 55769d377dca312451a627e825a813b589b4d1b9 | refs/heads/master | 2021-06-16T20:38:10.091057 | 2017-05-19T14:02:56 | 2017-05-19T14:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,880 | py | from datetime import datetime
import os
import pytz
import shutil
from cms.models import Page
from md_importer.importer import (
DEFAULT_TEMPLATE,
TEMPLATE_CHOICES,
)
from md_importer.importer.article import Article
from md_importer.importer.publish import find_text_plugin
from md_importer.importer.tools import remove_trailing_slash
from .utils import (
PublishedPages,
TestLocalBranchImport,
)
class TestImportDirectivesBuildHierarchyImport(TestLocalBranchImport):
'''
Build a article tree structure from files in the snapcraft tree.
Make sure the top-most articles (in the tree hierarchy) are imported
first, so the tree looks like this:
a
+-- b
+------ c
| +-- d
| | +-- e
| | +-- f
+-- c2 |
+-- d2
Files in the import directives are random in the beginning.
'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs/debug.md', 'a/b/c/d')
self.repo.add_directive('docs/intro.md', 'a/b')
self.repo.add_directive('docs/mir-snaps.md', 'a/b/c/d/e/f')
self.repo.add_directive('docs/reference.md', 'a')
self.repo.add_directive('docs/snapcraft-advanced-features.md', 'a/b/c')
self.repo.add_directive('docs/snapcraft-syntax.md', 'a/b/c2')
self.repo.add_directive('docs/upload-your-snap.md', 'a/b/c/d2')
self.repo.add_directive('docs/get-started.md', 'a/b/c/d/e')
self.assertEqual(len(self.repo.directives), 8)
self.assertTrue(self.repo.execute_import_directives())
self.assertEqual(len(self.repo.imported_articles), 8)
self.assertTrue(self.repo.publish())
pages = Page.objects.all()
self.assertGreater(pages.count(), len(self.repo.imported_articles))
# Make sure we got the parents right
for article in self.repo.imported_articles:
parent_url = remove_trailing_slash(
article.article_page.page.parent.get_absolute_url())
url = remove_trailing_slash(
article.article_page.page.get_absolute_url())
self.assertEqual(parent_url, os.path.split(url)[0])
self.assertIsInstance(article, Article)
class TestOneDirImport(TestLocalBranchImport):
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertEqual(len(self.repo.directives), 1)
self.assertTrue(self.repo.execute_import_directives())
self.assertGreater(len(self.repo.imported_articles), 10)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)+1)) # + landing/index page
for article in self.repo.imported_articles:
self.assertIsInstance(article, Article)
class TestOneDirAndTwoFilesImport(TestLocalBranchImport):
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.repo.add_directive('README.md', '')
self.repo.add_directive('HACKING.md', 'hacking')
self.assertEqual(len(self.repo.directives), 3)
self.assertTrue(self.repo.execute_import_directives())
self.assertGreater(len(self.repo.imported_articles), 10)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)))
self.assertTrue(published_pages.has_at_least_size(10))
self.assertTrue(published_pages.contains_url(u'/en/'))
self.assertTrue(published_pages.contains_url('/en/hacking/'))
class TestArticletreeOneFileImport(TestLocalBranchImport):
'''Check if all importe article has 'root' as parent.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('README.md', 'readme')
self.assertEqual(len(self.repo.directives), 1)
self.assertTrue(self.repo.execute_import_directives())
self.assertEqual(len(self.repo.imported_articles), 1)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
1+1, # readme + root
))
self.assertEqual(self.repo.pages[0].parent, self.root)
class TestArticletreeOneDirImport(TestLocalBranchImport):
'''Check if all imported articles have 'root' as parent.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertTrue(self.repo.execute_import_directives())
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
for page in published_pages.pages:
if page.parent is not None:
self.assertEqual(page.parent_id, self.root.id)
class TestArticleHTMLTagsAfterImport(TestLocalBranchImport):
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertEqual(len(self.repo.directives), 1)
self.assertTrue(self.repo.execute_import_directives())
self.assertGreater(len(self.repo.imported_articles), 10)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)+1)) # + landing/index page
for article in self.repo.imported_articles:
self.assertIsInstance(article, Article)
self.assertNotIn('<body>', article.html)
self.assertNotIn('<body>', article.html)
class TestNoneInURLAfterImport(TestLocalBranchImport):
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertEqual(len(self.repo.directives), 1)
self.assertTrue(self.repo.execute_import_directives())
self.assertGreater(len(self.repo.imported_articles), 10)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)+1)) # + landing/index page
for article in self.repo.imported_articles:
self.assertIsInstance(article, Article)
self.assertNotIn('/None/', article.full_url)
for page in published_pages.pages:
self.assertIsNotNone(page.get_slug())
class TestAdvertiseImport(TestLocalBranchImport):
'''Check if all imported articles are advertised in the navigation when
using defaults.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertTrue(self.repo.execute_import_directives())
for article in self.repo.imported_articles:
self.assertTrue(article.advertise)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
for page in published_pages.pages:
if page.parent is not None:
parent = page.parent.get_public_object()
self.assertEqual(parent.id, self.root.id)
self.assertTrue(page.in_navigation)
class TestNoAdvertiseImport(TestLocalBranchImport):
'''Check if all imported articles are advertised in the navigation when
using defaults.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '', advertise=False)
self.assertTrue(self.repo.execute_import_directives())
for article in self.repo.imported_articles:
self.assertFalse(article.advertise)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
for page in published_pages.pages:
if page.parent is not None:
self.assertEqual(page.parent_id, self.root.id)
self.assertFalse(page.in_navigation)
class TestTwiceImport(TestLocalBranchImport):
'''Run import on the same contents twice, make sure we don't
add new pages over and over again.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertTrue(self.repo.execute_import_directives())
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)+1)) # articles + root
# Run second import
shutil.rmtree(self.tempdir)
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertEqual(len(self.repo.directives), 1)
self.assertEqual(len(self.repo.imported_articles), 0)
self.assertTrue(self.repo.execute_import_directives())
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)+1)) # articles + root
class TestTwiceImportNoHtmlChange(TestLocalBranchImport):
'''Run import on the same contents twice, make sure we don't
update the HTML in the pages over and over again.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertTrue(self.repo.execute_import_directives())
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
self.assertTrue(published_pages.has_size(
len(self.repo.imported_articles)+1)) # articles + root
shutil.rmtree(self.tempdir)
# Take the time before publishing the second import
now = datetime.now(pytz.utc)
# Run second import
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertEqual(len(self.repo.directives), 1)
self.assertEqual(len(self.repo.imported_articles), 0)
self.assertTrue(self.repo.execute_import_directives())
self.assertTrue(self.repo.publish())
# Check the page's plugins
published_pages.update()
for page in published_pages.pages:
if page != self.root:
plugin = find_text_plugin(page)
self.assertGreater(now, plugin.changed_date)
class TestImportNoTemplateChange(TestLocalBranchImport):
'''Check if all imported articles use the default template.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
self.repo.add_directive('docs', '')
self.assertTrue(self.repo.execute_import_directives())
for article in self.repo.imported_articles:
self.assertEqual(article.template, DEFAULT_TEMPLATE)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
for page in published_pages.pages:
if page.parent is not None:
self.assertEqual(page.template, DEFAULT_TEMPLATE)
class TestImportTemplateChange(TestLocalBranchImport):
'''Check if all imported articles use the desired template.'''
def runTest(self):
self.create_repo('data/snapcraft-test')
template_to_use = TEMPLATE_CHOICES[1][0]
self.repo.add_directive('docs', '', template=template_to_use)
self.assertTrue(self.repo.execute_import_directives())
for article in self.repo.imported_articles:
self.assertEqual(article.template, template_to_use)
self.assertTrue(self.repo.publish())
published_pages = PublishedPages()
for page in published_pages.pages:
if page.parent is not None:
self.assertEqual(page.template, template_to_use)
| [
"[email protected]"
] | |
92a3ec3bafed166964ecb71e18635df198972a96 | f518506fb620fd29a2db876c05de813508eda519 | /TemplateExample/manage.py | a0f52c5a89e4be31918181439449577e88ec8949 | [] | no_license | Niharika3128/Django5-6 | 07435ae9088659e2d192cda60542aee5214e0637 | be3055ca91da45c37f9ec1adb626eea335477746 | refs/heads/master | 2020-06-02T04:28:37.016405 | 2019-06-09T17:28:33 | 2019-06-09T17:28:33 | 191,035,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TemplateExample.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
48db028ef8f7d1a4e3cb06f35a3d87d695fad4f9 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipaySecurityProdFacePayResponse.py | 1fe1ab1693136b166a60fe8ae2da572173c1432e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 652 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipaySecurityProdFacePayResponse(AlipayResponse):
def __init__(self):
super(AlipaySecurityProdFacePayResponse, self).__init__()
self._aa = None
@property
def aa(self):
return self._aa
@aa.setter
def aa(self, value):
self._aa = value
def parse_response_content(self, response_content):
response = super(AlipaySecurityProdFacePayResponse, self).parse_response_content(response_content)
if 'aa' in response:
self.aa = response['aa']
| [
"[email protected]"
] | |
d15d0cc4b11412348704da95bb09c8c43cc6c08d | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /tests/components/devolo_home_network/test_init.py | 99b6053e1bac778817fc9ab95799930fd4813530 | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 4,125 | py | """Test the devolo Home Network integration setup."""
from unittest.mock import patch
from devolo_plc_api.exceptions.device import DeviceNotFound
import pytest
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.button import DOMAIN as BUTTON
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.devolo_home_network.const import DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_IP_ADDRESS, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import async_get_platforms
from . import configure_integration
from .const import IP
from .mock import MockDevice
from tests.common import MockConfigEntry
@pytest.mark.usefixtures("mock_device")
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test setup entry."""
entry = configure_integration(hass)
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
), patch("homeassistant.core.EventBus.async_listen_once"):
assert await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.LOADED
@pytest.mark.usefixtures("mock_device")
async def test_setup_without_password(hass: HomeAssistant) -> None:
"""Test setup entry without a device password set like used before HA Core 2022.06."""
config = {
CONF_IP_ADDRESS: IP,
}
entry = MockConfigEntry(domain=DOMAIN, data=config)
entry.add_to_hass(hass)
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
), patch("homeassistant.core.EventBus.async_listen_once"):
assert await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.LOADED
async def test_setup_device_not_found(hass: HomeAssistant) -> None:
"""Test setup entry."""
entry = configure_integration(hass)
with patch(
"homeassistant.components.devolo_home_network.Device.async_connect",
side_effect=DeviceNotFound(IP),
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.SETUP_RETRY
@pytest.mark.usefixtures("mock_device")
async def test_unload_entry(hass: HomeAssistant) -> None:
"""Test unload entry."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
await hass.config_entries.async_unload(entry.entry_id)
assert entry.state is ConfigEntryState.NOT_LOADED
async def test_hass_stop(hass: HomeAssistant, mock_device: MockDevice) -> None:
"""Test homeassistant stop event."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
mock_device.async_disconnect.assert_called_once()
@pytest.mark.parametrize(
("device", "expected_platforms"),
[
["mock_device", (BINARY_SENSOR, BUTTON, DEVICE_TRACKER, SENSOR, SWITCH)],
["mock_repeater_device", (BUTTON, DEVICE_TRACKER, SENSOR, SWITCH)],
["mock_nonwifi_device", (BINARY_SENSOR, BUTTON, SENSOR, SWITCH)],
],
)
async def test_platforms(
hass: HomeAssistant,
device: str,
expected_platforms: set[str],
request: pytest.FixtureRequest,
) -> None:
"""Test platform assembly."""
request.getfixturevalue(device)
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
platforms = [platform.domain for platform in async_get_platforms(hass, DOMAIN)]
assert len(platforms) == len(expected_platforms)
assert all(platform in platforms for platform in expected_platforms)
| [
"[email protected]"
] | |
5c1f6f6af0e742423a0b285a3f8a9c60555bb95a | 897802abf4ee5c7267de3eb5e321cc931898e2f6 | /python/python/songTian/part0_base/week01/c121_draw/circle_draw/__init__.py | 47b01f56af458c7295a4d27c51f018df486f59fb | [] | no_license | aojie654/codes_store | 0527c7a7729b472e8fd2fd67af462cf857970633 | ed71b6266b2d2b5ddefadcb958f17695fb9db6cf | refs/heads/master | 2021-07-15T17:04:33.591673 | 2021-07-03T14:42:30 | 2021-07-03T14:42:30 | 132,343,733 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import turtle
turtle.pensize(2)
turtle.circle(10)
turtle.circle(20)
turtle.circle(40)
turtle.circle(80)
turtle.circle(160)
| [
"[email protected]"
] | |
a0974ac4da7ff77096e13917b61406bc2ef64c90 | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py | 9fe64c1105b8b5680b9561071435c59c048f0156 | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 6,403 | py | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to after google_data_preprocessing_before_alignment.py
to obtain separate "parallel" corpora for each semiotic class.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`).
Then there will a folder named `en_with_types`.
3. Run python google_data_preprocessing_before_alignment.py
which will produce a file data.tsv in its --output-dir
4. [Optional]. sort -u and rewrite data.tsv
5. Clone https://github.com/moses-smt/giza-pp.git, run "make" from its root folder.
6. Run this script
python ${NEMO}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py \
--data_dir=<--output-dir from the previous step> \
--out_dir=<destination directory for giza alignment folders> \
--giza_dir=/.../giza-pp/GIZA++-v2 \
--mckls_binary=/.../giza-pp/mkcls-v2/mkcls \
--lang={en,ru}
Each corpus will be stored within <--data-dir> in the subdirectory with the name of the semiotic class,
containing files ready to be fed to Giza++:
src - written form, tokenized as characters
dst - spoken form, tokenized as words
run.sh - script for running Giza++
"""
from argparse import ArgumentParser
from collections import Counter
from os import listdir, mkdir
from os.path import isdir, join
from shutil import rmtree
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment
parser = ArgumentParser(description='Split corpus to subcorpora for giza alignment')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--out_dir', type=str, required=True, help='Path to output folder')
parser.add_argument('--giza_dir', type=str, required=True, help='Path to folder with GIZA++ binaries')
parser.add_argument('--mckls_binary', type=str, required=True, help='Path to mckls binary')
parser.add_argument('--lang', type=str, required=True, help='Language')
args = parser.parse_args()
def prepare_subcorpora_from_data() -> None:
"""Preprocess a corpus in Google TN Dataset format, extract TN-ITN phrase pairs, prepare input for GIZA++ alignment.
"""
semiotic_vcb = Counter()
cache_vcb = {}
filenames = []
for fn in listdir(args.data_dir + "/train"):
filenames.append(args.data_dir + "/train/" + fn)
for fn in listdir(args.data_dir + "/dev"):
filenames.append(args.data_dir + "/dev/" + fn)
for fn in filenames:
with open(fn, "r", encoding="utf-8") as f:
# Loop through each line of the file
for line in f:
parts = line.strip().split("\t")
if len(parts) < 3:
continue
if len(parts) != 3:
raise ValueError("Expect 3 parts, got " + str(len(parts)))
semiotic_class, written, spoken = parts[0], parts[1].strip(), parts[2].strip()
if spoken == "<self>":
continue
semiotic_class = semiotic_class.casefold()
semiotic_vcb[semiotic_class] += 1
classdir = join(args.out_dir, semiotic_class)
if not isdir(classdir):
mkdir(classdir)
src, dst, _, _ = get_src_and_dst_for_alignment(semiotic_class, written, spoken, args.lang)
if src == "" or dst == "":
continue
if len(src.split(" ")) >= 100:
continue
if semiotic_class not in cache_vcb:
cache_vcb[semiotic_class] = Counter()
cache_vcb[semiotic_class][(src, dst)] += 1
for sem in semiotic_vcb:
classdir = join(args.out_dir, sem)
if not isdir(classdir):
raise ValueError("No such directory: " + classdir)
print(classdir, " has ", semiotic_vcb[sem], " instances")
with open(join(classdir, "run.sh"), "w") as out:
out.write("GIZA_PATH=\"" + args.giza_dir + "\"\n")
out.write("MKCLS=\"" + args.mckls_binary + "\"\n")
out.write("\n")
out.write("${GIZA_PATH}/plain2snt.out src dst\n")
out.write("${MKCLS} -m2 -psrc -c15 -Vsrc.classes opt >& mkcls1.log\n")
out.write("${MKCLS} -m2 -pdst -c15 -Vdst.classes opt >& mkcls2.log\n")
out.write("${GIZA_PATH}/snt2cooc.out src.vcb dst.vcb src_dst.snt > src_dst.cooc\n")
out.write(
"${GIZA_PATH}/GIZA++ -S src.vcb -T dst.vcb -C src_dst.snt -coocurrencefile src_dst.cooc -p0 0.98 -o GIZA++ >& GIZA++.log\n"
)
out.write("##reverse direction\n")
out.write("${GIZA_PATH}/snt2cooc.out dst.vcb src.vcb dst_src.snt > dst_src.cooc\n")
out.write(
"${GIZA_PATH}/GIZA++ -S dst.vcb -T src.vcb -C dst_src.snt -coocurrencefile dst_src.cooc -p0 0.98 -o GIZA++reverse >& GIZA++reverse.log\n"
)
out_src = open(join(classdir, "src"), 'w', encoding="utf-8")
out_dst = open(join(classdir, "dst"), 'w', encoding="utf-8")
out_freq = open(join(classdir, "freq"), 'w', encoding="utf-8")
for src, dst in cache_vcb[sem]:
freq = cache_vcb[sem][(src, dst)]
out_src.write(src + "\n")
out_dst.write(dst + "\n")
out_freq.write(str(freq) + "\n")
out_freq.close()
out_dst.close()
out_src.close()
# Main code
if __name__ == '__main__':
for name in listdir(args.out_dir):
path = join(args.out_dir, name)
if isdir(path):
rmtree(path)
# Processing
prepare_subcorpora_from_data()
| [
"[email protected]"
] | |
caf65928426dfc47a7b74d4a1e41cd9f7f7b4eb7 | 844bec60f1a769b2a5e68e4beeb6531edc3ce4e3 | /my_instagram/member/tests.py | d76a21e9183a029a7c58801d925daa20cceb0141 | [] | no_license | shoark7/my-instagram | 04796a8c263009ef9128ec788ce8991c417fa918 | 42d1b43645c142c9d7f8c6df31865877ea1873a8 | refs/heads/master | 2020-12-24T07:53:07.248369 | 2016-11-15T06:36:44 | 2016-11-15T06:36:44 | 73,359,919 | 0 | 0 | null | 2016-11-13T09:05:29 | 2016-11-10T08:05:13 | Python | UTF-8 | Python | false | false | 1,004 | py | from django.test import TestCase, LiveServerTestCase
from .models import MyUser
# Create your tests here.
class FollowTest(LiveServerTestCase):
def create_user(self, username, last_name, first_name):
return MyUser.objects.create_user(
username=username,
last_name=last_name,
first_name=first_name,
)
def test_create_user(self):
print("test create user")
u1 = self.create_user('u1', '방','민아')
u2 = self.create_user('u2', 'dl','ㅇ하녕')
u3 = self.create_user('u3', '박','성환')
def test_follow_user(self):
print("test create user")
u1 = self.create_user('u1', '방', '민아')
u2 = self.create_user('u2', 'dl', 'ㅇ하녕')
u3 = self.create_user('u3', '박', '성환')
u2.follow(u1)
u3.follow(u2)
u3.follow(u1)
print(u2.follow_users.all())
print(u3.follow_users.all())
print()
print(u1.followers.all())
| [
"[email protected]"
] | |
8eaf470b58b0830ec791b74d821eb954a7fd1a02 | dbfdbe3c1d5e3ad38625d8c971fe8dd45c8c3885 | /device_agent/snmp/libs/pysnmp-4.4.5/examples/smi/agent/custom-managed-object.py | 0b175193bfa76c391ed6cdfa364259d6af77d5df | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | fyfdoc/IntegrateTest | a58f6d0ea7cff5f67d79d7e042c0bb39c6b8bbbb | 0d8374406c10c313d6627699879215841e0ebdb6 | refs/heads/master | 2022-12-03T02:32:37.388556 | 2019-01-25T02:36:42 | 2019-01-25T02:36:42 | 167,468,256 | 0 | 1 | null | 2022-11-29T20:58:41 | 2019-01-25T01:59:28 | Python | UTF-8 | Python | false | false | 1,604 | py | """
Implementing MIB objects
++++++++++++++++++++++++
This script explains how SNMP Agent application could model
real-world data as Managed Objects defined in MIB.
"""#
from pysnmp.smi import builder
# MIB Builder is normally pre-created by SNMP engine
mibBuilder = builder.MibBuilder()
#
# This may be done in a stand-alone file and then loaded up
# by SNMP Agent
#
# A base class for a custom Managed Object
MibScalarInstance, = mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalarInstance'
)
# Managed object specification
sysLocation, = mibBuilder.importSymbols('SNMPv2-MIB', 'sysLocation')
# Custom Managed Object
class MySysLocationInstance(MibScalarInstance):
# noinspection PyUnusedLocal
def readGet(self, name, *args):
# Just return a custom value
return name, self.syntax.clone('The Leaky Cauldron')
sysLocationInstance = MySysLocationInstance(
sysLocation.name, (0,), sysLocation.syntax
)
# Register Managed Object with a MIB tree
mibBuilder.exportSymbols(
# '__' prefixed MIB modules take precedence on indexing
'__MY-LOCATION-MIB', sysLocationInstance=sysLocationInstance
)
if __name__ == '__main__':
#
# This is what is done internally by Agent.
#
from pysnmp.smi import instrum, exval
mibInstrum = instrum.MibInstrumController(mibBuilder)
print('Remote manager read access to MIB instrumentation (table walk)')
oid, val = (), None
while 1:
oid, val = mibInstrum.readNextVars(((oid, val),))[0]
if exval.endOfMib.isSameTypeWith(val):
break
print(oid, val.prettyPrint())
| [
"[email protected]"
] | |
95206566ebb073185d5e386f1509f946bc331050 | e953c138d3808d92fcc9848824985be5bc42f034 | /python/multiprocessing/lock.py | bb10bc3b3357cb248b6dc23f5ba3a856a0415d2b | [] | no_license | hotoku/samples | 1cf3f7006ae8ba9bae3a52113cdce6d1e1d32c5a | ce0d95d87e08386d9eb83d7983bd2eaff0682793 | refs/heads/main | 2023-08-09T09:05:15.185012 | 2023-08-04T09:29:06 | 2023-08-04T09:29:06 | 222,609,036 | 0 | 0 | null | 2022-03-30T01:44:03 | 2019-11-19T04:35:27 | Jupyter Notebook | UTF-8 | Python | false | false | 536 | py | from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
import time
import multiprocessing
def do_f(x, y):
print(f"{datetime.now()}: do_f x={x}, y={y}")
time.sleep(3)
return x + y
def f(x, y, lock):
print(f"{datetime.now()}: f x={x}, y={y}")
with lock:
return do_f(x, y)
m = multiprocessing.Manager()
lock = m.Lock()
with ProcessPoolExecutor(max_workers=4) as ex:
fs = [
ex.submit(f, i, 1, lock) for i in range(3)
]
res = [f.result() for f in fs]
print(res)
| [
"[email protected]"
] | |
5bfb68f5064b121eb578adf71745af4cd3d8ac5e | 6c9ed1bbf924591b9044ddcba03dee701ba39b2b | /recordwhat/records/seq.py | c1c9bf568716b234c6400657038caeed66d2edde | [
"BSD-3-Clause"
] | permissive | klauer/recordwhat | 986b1a8cd0d57902f25fb09573e96823bffd333e | 2921a9852361cc682ec8441bb5f4cc54b6af6e80 | refs/heads/master | 2022-02-03T15:36:27.648497 | 2022-01-25T22:56:58 | 2022-01-25T22:56:58 | 51,451,764 | 2 | 4 | BSD-3-Clause | 2022-01-25T22:56:59 | 2016-02-10T15:56:30 | Python | UTF-8 | Python | false | false | 2,605 | py | from ophyd import (EpicsSignal, EpicsSignalRO)
from .. import (RecordBase, _register_record_type,
FieldComponent as Cpt)
@_register_record_type('seq')
class SeqRecord(RecordBase):
alarm_status = Cpt(EpicsSignalRO, '.STAT')
constant_input_1 = Cpt(EpicsSignal, '.DO1')
constant_input_10 = Cpt(EpicsSignal, '.DOA')
constant_input_2 = Cpt(EpicsSignal, '.DO2')
constant_input_3 = Cpt(EpicsSignal, '.DO3')
constant_input_4 = Cpt(EpicsSignal, '.DO4')
constant_input_5 = Cpt(EpicsSignal, '.DO5')
constant_input_6 = Cpt(EpicsSignal, '.DO6')
constant_input_7 = Cpt(EpicsSignal, '.DO7')
constant_input_8 = Cpt(EpicsSignal, '.DO8')
constant_input_9 = Cpt(EpicsSignal, '.DO9')
link_selection = Cpt(EpicsSignal, '.SELN')
# - display
display_precision = Cpt(EpicsSignal, '.PREC')
# - inputs
link_selection_loc = Cpt(EpicsSignal, '.SELL$', string=True)
select_mechanism = Cpt(EpicsSignal, '.SELM')
# - seq1
delay_1 = Cpt(EpicsSignal, '.DLY1')
delay_2 = Cpt(EpicsSignal, '.DLY2')
delay_3 = Cpt(EpicsSignal, '.DLY3')
input_link_2 = Cpt(EpicsSignal, '.DOL2$', string=True)
input_link_3 = Cpt(EpicsSignal, '.DOL3$', string=True)
input_link1 = Cpt(EpicsSignal, '.DOL1$', string=True)
output_link_1 = Cpt(EpicsSignal, '.LNK1$', string=True)
output_link_2 = Cpt(EpicsSignal, '.LNK2$', string=True)
output_link_3 = Cpt(EpicsSignal, '.LNK3$', string=True)
# - seq2
delay_4 = Cpt(EpicsSignal, '.DLY4')
delay_5 = Cpt(EpicsSignal, '.DLY5')
delay_6 = Cpt(EpicsSignal, '.DLY6')
input_link_4 = Cpt(EpicsSignal, '.DOL4$', string=True)
input_link_5 = Cpt(EpicsSignal, '.DOL5$', string=True)
input_link_6 = Cpt(EpicsSignal, '.DOL6$', string=True)
output_link_4 = Cpt(EpicsSignal, '.LNK4$', string=True)
output_link_5 = Cpt(EpicsSignal, '.LNK5$', string=True)
output_link_6 = Cpt(EpicsSignal, '.LNK6$', string=True)
# - seq3
delay_10 = Cpt(EpicsSignal, '.DLYA')
delay_7 = Cpt(EpicsSignal, '.DLY7')
delay_8 = Cpt(EpicsSignal, '.DLY8')
delay_9 = Cpt(EpicsSignal, '.DLY9')
input_link_10 = Cpt(EpicsSignal, '.DOLA$', string=True)
input_link_7 = Cpt(EpicsSignal, '.DOL7$', string=True)
input_link_8 = Cpt(EpicsSignal, '.DOL8$', string=True)
input_link_9 = Cpt(EpicsSignal, '.DOL9$', string=True)
output_link_10 = Cpt(EpicsSignal, '.LNKA$', string=True)
output_link_7 = Cpt(EpicsSignal, '.LNK7$', string=True)
output_link_8 = Cpt(EpicsSignal, '.LNK8$', string=True)
output_link_9 = Cpt(EpicsSignal, '.LNK9$', string=True)
| [
"[email protected]"
] | |
c9e6afdec8ce261e8a2f43fc4b54c4dbdb5e3542 | 0547c3ebab814e3fdf2616ae63f8f6c87a0ff6c5 | /374.guess-number-higher-or-lower.py | 700808b1ba1e1ec628739b2454d3092046d932a9 | [] | no_license | livepo/lc | b8792d2b999780af5d5ef3b6050d71170a272ca6 | 605d19be15ece90aaf09b994098716f3dd84eb6a | refs/heads/master | 2020-05-15T03:57:15.367240 | 2019-07-30T03:11:46 | 2019-07-30T03:11:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
| [
"[email protected]"
] | |
fe57d823e3f211eb951f7d95c44156eae836df34 | 34ffb243303300ccb233dc8394c78d6cb1212127 | /registration/tests.py | a785686bfbf3b73736e23c595b6775a5b2005a25 | [] | no_license | Larrygf02/webplayground | 139955880e9e1a935cf7fbc89df9f1ebf45009f0 | 09579f3705e74ddd3380d905f5e0b8df0f93032a | refs/heads/master | 2020-04-28T01:01:36.451838 | 2019-03-10T14:59:48 | 2019-03-10T14:59:48 | 174,838,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from django.test import TestCase
from .models import Profile
from django.contrib.auth.models import User
# Create your tests here.
class ProfileTestCase(TestCase):
def setUp(self):
User.objects.create_user('test','[email protected]', 'test1234')
def test_profile_exists(self):
exists = Profile.objects.filter(user__username='test').exists()
self.assertEquals(exists, True)
| [
"[email protected]"
] | |
a941aa45b4e10dc6b7401fbe7ba650f95322f544 | dd6dd68d17b7355e01c4ce86649423974bb69c50 | /Masters/migrations/0002_serialnumber.py | b2266937dd7911f5043e0f92d9384abefe27e6ea | [
"MIT"
] | permissive | adithyanps/netprofit-django | 2b9953296fb016e4a16b30768ba864f91882573f | 7ba87f054d09a201352635bb6cf8d0112208609e | refs/heads/master | 2020-05-04T16:43:14.547552 | 2019-09-30T13:20:07 | 2019-09-30T13:20:07 | 179,285,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Generated by Django 2.2.4 on 2019-09-25 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Masters', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SerialNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prefix', models.CharField(max_length=6)),
('suffix', models.CharField(max_length=6)),
('start_number', models.IntegerField()),
('padding', models.IntegerField()),
('type', models.CharField(choices=[('CN', 'CreditNote'), ('DN', 'DebitNote'), ('SI', 'Sales'), ('CR', 'customer_reciept')], max_length=10)),
],
),
]
| [
"[email protected]"
] | |
4a1e2396753dbd98038ff0f8e6b6c8c9df9d2267 | 1337ccefdeddc753090281d473fa1e08d42b6884 | /bnpl/plugin_itunes.py | f85c544dc2c7004c8fd8d1b161314418b8a5db90 | [] | no_license | abelsonlive/bnpl | 8bd97d7bec8933642188814e07a38b544bcb3963 | 8f49a6d257fab75b7659ba2bae502595a164b8ee | refs/heads/master | 2021-01-02T08:46:39.093073 | 2017-01-30T19:09:57 | 2017-01-30T19:09:57 | 76,924,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from pyItunes import Library
from bnpl import Option, OptionSet
from bnpl import Extractor
class ItunesSongs(Extractor):
"""
Extract sounds from your Itunes Library.
"""
options = OptionSet(
Option('library_xml', type='path', required=True)
)
def run(self):
"""
"""
l = Library(self.options['library_xml'])
for id, song in l.songs.items():
yield song | [
"[email protected]"
] | |
7fcd687644a4140303be421ead340e6b0a7527f4 | e27f9f1f8bef8b1f4676df84ee3e753974d21a1c | /ignite/contrib/metrics/precision_recall_curve.py | 5021315904b334d819722a75e9cb9036f4d4d11b | [
"BSD-3-Clause"
] | permissive | pytorch/ignite | 8fb275638e94e702762eec932b21dc8df7a54cb0 | 34a707e53785cf8a524589f33a570a7516fe064e | refs/heads/master | 2023-09-02T00:27:22.485479 | 2023-08-31T15:10:14 | 2023-08-31T15:10:14 | 111,835,796 | 4,613 | 788 | BSD-3-Clause | 2023-09-13T07:46:41 | 2017-11-23T17:31:21 | Python | UTF-8 | Python | false | false | 5,596 | py | from typing import Any, Callable, cast, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
try:
from sklearn.metrics import precision_recall_curve
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return precision_recall_curve(y_true, y_pred)
class PrecisionRecallCurve(EpochMetric):
"""Compute precision-recall pairs for different probability thresholds for binary classification task
by accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `precision_recall_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
#sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
Note:
PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])
print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
.. testoutput::
Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
Recall [1.0, 1.0, 1.0, 0.5, 0.0]
Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(PrecisionRecallCurve, self).__init__(
precision_recall_curve_compute_fn, # type: ignore[arg-type]
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
if self._result is None:
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
precision = torch.tensor(precision, device=_prediction_tensor.device)
recall = torch.tensor(recall, device=_prediction_tensor.device)
# thresholds can have negative strides, not compatible with torch tensors
# https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
else:
precision, recall, thresholds = None, None, None
if ws > 1:
# broadcast result to all processes
precision = idist.broadcast(precision, src=0, safe_mode=True)
recall = idist.broadcast(recall, src=0, safe_mode=True)
thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
self._result = (precision, recall, thresholds) # type: ignore[assignment]
return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
| [
"[email protected]"
] | |
e4fcd91448875458507f16667600fe6a845f31a3 | 403eac0dab1a935bf2ce1833a950a29ea5bc1d0b | /sentencepiece的使用.py | bd9b013f11aa4224b1c676e5b7c2b8b33899b978 | [
"MIT"
] | permissive | Le-Code/nlp-tutorial | ada91aaa0e50ff72bf4a24d70c40f67b58191f94 | c8f46f702cc8643bb252a65b0a8cf375c7bd4704 | refs/heads/master | 2020-09-25T18:39:17.726661 | 2019-11-05T16:29:09 | 2019-11-05T16:29:09 | 226,065,142 | 1 | 0 | null | 2019-12-05T09:30:57 | 2019-12-05T09:30:56 | null | UTF-8 | Python | false | false | 892 | py | '''
` % python
>>> import sentencepiece as spm
>>> sp = spm.SentencePieceProcessor()
>>> sp.Load("test/test_model.model") True
>>> sp.EncodeAsPieces("This is a test") ['\xe2\x96\x81This', '\xe2\x96\x81is', '\xe2\x96\x81a', '\xe2\x96\x81', 't', 'est']
>>> sp.EncodeAsIds("This is a test") [284, 47, 11, 4, 15, 400]
>>> sp.DecodePieces(['\xe2\x96\x81This', '\xe2\x96\x81is', '\xe2\x96\x81a', '\xe2\x96\x81', 't', 'est']) 'This is a test'
>>> sp.DecodeIds([284, 47, 11, 4, 15, 400]) 'This is a test'
>>> sp.GetPieceSize() 1000
>>> sp.IdToPiece(2) '</s>'
>>> sp.PieceToId('</s>') 2
>>> len(sp) 1000
>>> sp['</s>'] 2 `
'''
#打算弄2个词表,一个是英文的一个是中文的,这样分散开会提高代码运行速度.词表小,会提高收敛速度,
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
help(sp)
'''
用法:https://github.com/zhangbo2008/sentencepiece
'''
| [
"[email protected]"
] | |
4ade1604112ef44e3fd921651808a154f424ddc5 | c7d91529db199322e39e54fe4051a75704ea843e | /算法4/字符串/字符串排序.py | 7194b2ca9bd98f1e8a4ad54cbca048dd9bead5fa | [] | no_license | 2226171237/Algorithmpractice | fc786fd47aced5cd6d96c45f8e728c1e9d1160b7 | 837957ea22aa07ce28a6c23ea0419bd2011e1f88 | refs/heads/master | 2020-12-26T07:20:37.226443 | 2020-09-13T13:31:05 | 2020-09-13T13:31:05 | 237,431,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,639 | py |
def keyIndexSort(arr,R):
'''键索引计数法(小整数排序)'''
aux=arr.copy()
count=[0 for _ in range(R+1)]
for val,key in arr:
count[key+1]+=1
for i in range(1,R+1):
count[i]+=count[i-1]
for val,key in arr:
aux[count[key]]=(val,key)
count[key]+=1
for i in range(len(aux)):
arr[i]=aux[i]
class LSD:
'''低位优先的字符串排序'''
def sort(self,arr,w):
'''
:param list[str] arr: 字符串列表
:param int w: 字符串长度
:return:
'''
N=len(arr)
R=256
aux=arr.copy()
for d in range(w-1,-1,-1):
count=[0 for _ in range(R+1)]
for x in arr:
count[ord(x[d])+1]+=1
for r in range(1,R+1):
count[r]+=count[r-1]
for i in range(N):
key=ord(arr[i][d])
aux[count[key]]=arr[i]
count[key]+=1
for i in range(N):
arr[i]=aux[i]
class InsertSort:
def sort(self,arr,low,high,d):
for i in range(low,high+1):
for j in range(i,low,-1):
if arr[j][d:]<arr[j-1][d:]:
arr[j-1],arr[j]=arr[j],arr[j-1]
class MSD:
'''高位优先排序'''
def __init__(self):
self.R=256
self.H=10
self.insertsort=InsertSort()
def sort(self,arr):
self.aux=arr.copy()
self.subsort(arr,0,len(arr)-1,0)
def key(self,s,d):
if d>=len(s):
return -1
return ord(s[d])
def subsort(self,arr,low,high,d):
if high<=low+self.H:
self.insertsort.sort(arr,low,high,d)
return
counts=[0 for _ in range(self.R+2)]
for i in range(low,high+1):
key=self.key(arr[i],d)
counts[key+2]+=1
for r in range(1,self.R+2):
counts[r]+=counts[r-1]
for i in range(low,high+1):
key=self.key(arr[i],d)
self.aux[counts[key+1]]=arr[i]
counts[key+1]+=1
for i in range(low,high+1):
arr[i]=self.aux[i-low]
for r in range(0,self.R):
self.subsort(arr,low+counts[r],low+counts[r+1]-1,d+1)
class Quick3string:
'''三向切分字符串快排'''
def key(self,s,d):
if len(s)<=d:
return -1
else:
return ord(s[d])
def sort(self,arr):
self._subsort(arr,0,len(arr)-1,0)
def _subsort(self,arr,low,high,d):
if low>=high:
return
v=self.key(arr[low],d)
lt=low
i=low+1
gt=high
while i<=gt:
t=self.key(arr[i],d)
if t<v:
arr[i],arr[lt]=arr[lt],arr[i]
i+=1
lt+=1
elif t==v:
i+=1
else:
arr[i],arr[gt]=arr[gt],arr[i]
gt-=1
self._subsort(arr,low,lt-1,d)
if v>0:
self._subsort(arr,lt,gt,d+1)
self._subsort(arr,gt+1,high,d)
if __name__ == '__main__':
arr=[('lijie',1),('liuzi',2),('zhangliu',2),('miwmi',1),('liuzhao',3),('xiaozju',2),('liejo',3),('liuzhao',0)]
keyIndexSort(arr,4)
print(arr)
arr=['4PGC938','2IYE230','3CI0720','1ICK750','10HV845','4JZY524','1ICK750','3CI0720','10HV845']
s=LSD()
s.sort(arr,7)
s=MSD()
arr=['she','by','shells','the','sea','are','surely','seashells']
s.sort(arr)
print(arr)
qs=Quick3string()
arr = ['she', 'by', 'shells', 'the', 'sea', 'are', 'surely', 'seashells']
qs.sort(arr)
print(arr)
| [
"[email protected]"
] | |
b3f161ea10b8165de2e33316af4d1adbbe9c50cd | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /SpamBayes/rev3250-3267/left-trunk-3267/spambayes/languages/es_AR/DIALOGS/i18n_dialogs.py | fc3a85b4790422e4c32722ca9c794fc869ce4704 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 26,914 | py | _rc_size_=33884
_rc_mtime_=1112074549
try:
_
except NameError:
def _(s):
return s
class FakeParser:
dialogs = {'IDD_MANAGER': [[_('SpamBayes Manager'), (0, 0, 275, 308), -1865940928, 1024, (8, 'Tahoma')], [128, _('Close'), 1, (216, 287, 50, 14), 1342177281], [128, _('Cancel'), 2, (155, 287, 50, 14), 1073741824], ['SysTabControl32', '', 1068, (8, 7, 258, 276), 1342177280], [128, _('About'), 1072, (8, 287, 50, 14), 1342177280]], 'IDD_DIAGNOSTIC': [[_('Diagnostics'), (0, 0, 183, 98), -1865940928, 1024, (8, 'Tahoma')], [130, _('These advanced options are for diagnostic or debugging purposes only. You should only change these options if specifically asked to, or you know exactly what they mean.'), -1, (5, 3, 174, 36), 1342177280], [130, _('Log file verbosity'), -1, (5, 44, 56, 8), 1342177280], [129, '', 1061, (73, 42, 40, 14), 1350566016], [128, _('View log...'), 1093, (129, 41, 50, 14), 1342177280], [128, _('Save Spam Score'), 1048, (5, 63, 72, 10), 1342242819], [128, _('Cancel'), 2, (69, 79, 50, 14), 1073741824], [128, _('Close'), 1, (129, 79, 50, 14), 1342177281]], 'IDD_FILTER_SPAM': [[_('Spam'), (0, 0, 251, 147), 1355284672, None, (8, 'Tahoma')], [130, _('Filter the following folders as messages arrive'), -1, (8, 9, 168, 11), 1342177280], [130, _('Folder names...\\nLine 2'), 1038, (7, 20, 177, 12), 1342312972], [128, _('Browse...'), 1039, (194, 19, 50, 14), 1342177280], [128, _('Certain Spam'), -1, (7, 43, 237, 80), 1342177287], [130, _('To be considered certain spam, a message must score at least'), -1, (13, 52, 212, 10), 1342177280], ['msctls_trackbar32', '', 1023, (13, 62, 165, 22), 1342242821], [129, '', 1024, (184, 63, 51, 14), 1350566016], [130, _('and these messages should be:'), -1, (13, 82, 107, 10), 1342177280], [133, '', 1025, (13, 93, 55, 40), 1344339971], [130, _('to folder'), -1, (75, 95, 31, 10), 1342177280], [130, _('Folder names...'), 1027, (120, 93, 59, 14), 1342312972], [128, _('Browse'), 1028, (184, 93, 50, 14), 1342177280], [128, _('Mark spam as read'), 1047, (13, 110, 81, 10), 1342242819]], 'IDD_TRAINING': [[_('Training'), (0, 0, 252, 257), 1355284672, 1024, (8, 'Tahoma')], [128, '', -1, (5, 1, 243, 113), 1342177287], [130, _('Folders with known good messages.'), -1, (11, 11, 131, 11), 1342177280], [130, '', 1002, (11, 21, 175, 12), 1342181900], [128, _('&Browse...'), 1004, (192, 20, 50, 14), 1342177280], [130, _('Folders with spam or other junk messages.'), -1, (11, 36, 171, 9), 1342177280], [130, _('Static'), 1003, (11, 46, 174, 12), 1342312972], [128, _('Brow&se...'), 1005, (192, 46, 50, 14), 1342177280], [128, _('Score &messages after training'), 1008, (11, 64, 111, 10), 1342242819], [128, _('&Rebuild entire database'), 1007, (137, 64, 92, 10), 1342242819], ['msctls_progress32', _('Progress1'), 1000, (11, 76, 231, 11), 1350565888], [128, _('&Start Training'), 1006, (11, 91, 54, 14), 1342193664], [130, _('training status training status training status training status training status training status training status '), 1001, (75, 89, 149, 17), 1342177280], [128, _('Incremental Training'), -1, (4, 117, 244, 87), 1342177287], [128, _('Train that a message is good when it is moved from a spam folder back to the Inbox.'), 1010, (11, 127, 204, 18), 1342251011], [130, _("Clicking 'Not Spam' button should"), -1, (10, 148, 115, 10), 1342177280], [133, '', 1075, (127, 145, 114, 54), 1344339971], [128, _('Train that a message is spam when it is moved to the spam folder.'), 1011, (11, 163, 204, 16), 1342251011], [130, _("Clicking 'Spam' button should"), -1, (10, 183, 104, 10), 1342177280], [133, '', 1074, (127, 180, 114, 54), 1344339971]], 'IDD_WIZARD': [[_('SpamBayes Configuration Wizard'), (0, 0, 384, 190), -1865940800, 1024, (8, 'Tahoma')], [128, _('Cancel'), 2, (328, 173, 50, 14), 1342177280], [128, _('<< Back'), 1069, (216, 173, 50, 14), 1342177280], [128, _('Next>>,Finish'), 1077, (269, 173, 50, 14), 1342177281], [130, '', 1078, (75, 4, 303, 167), 1342177298], [130, '125', 1092, (0, 0, 69, 190), 1342177294]], 'IDD_WIZARD_FOLDERS_WATCH': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [128, _('Browse...'), 1039, (225, 134, 50, 14), 1342177280], [130, _('Folders that receive new messages'), -1, (20, 4, 247, 14), 1342177280], [130, _('SpamBayes needs to know what folders are used to receive new messages. In most cases, this will be your Inbox, but you may also specify additional folders to be watched for spam.'), -1, (20, 21, 247, 25), 1342177280], [130, _('The following folders will be watched for new messages. Use the Browse button to change the list, or Next if the list of folders is correct.'), -1, (20, 79, 247, 20), 1342177280], [130, _('If you use the Outlook rule wizard to move messages into folders, you may like to select these folders in addition to your inbox.'), -1, (20, 51, 241, 20), 1342177280], [129, '', 1038, (20, 100, 195, 48), 1350568068]], 'IDD_WIZARD_FINISHED_TRAINED': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('Congratulations'), -1, (20, 4, 247, 14), 1342177280], [130, _('SpamBayes has been successfully trained and configured. You should find the system is immediately effective at filtering spam.'), 1035, (20, 35, 247, 26), 1342177280], [130, _("Even though SpamBayes has been trained, it does continue to learn - please ensure you regularly check your Unsure folder, and use the 'Spam' or 'Not Spam' buttons as appropriate."), -1, (20, 68, 249, 30), 1342177280], [130, _('Click Finish to close the wizard.'), -1, (20, 104, 148, 9), 1342177280]], 'IDD_WIZARD_FOLDERS_TRAIN': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [128, _('Browse...'), 1004, (208, 49, 60, 15), 1342177280], [130, _('Training'), -1, (20, 4, 247, 10), 1342177280], [130, _('Please select the folders with the pre-sorted good messages and the folders with the pre-sorted spam messages.'), -1, (20, 16, 243, 16), 1342177280], [129, '', 1083, (20, 49, 179, 14), 1350568064], [130, _('Examples of Spam, or unwanted messages can be found in'), -1, (20, 71, 198, 8), 1342177280], [129, '', 1027, (20, 81, 177, 14), 1350568064], [130, _('Examples of good messages can be found in'), -1, (20, 38, 153, 8), 1342177280], [128, _('Browse...'), 1005, (208, 81, 60, 15), 1342177280], [130, _('If you have not pre-sorted your messages, or already have training information you wish to keep, please select the Back button and indicate you have not prepared for SpamBayes.'), -1, (20, 128, 243, 26), 1342177280], [128, _('Score messages when training is complete'), 1008, (20, 108, 163, 16), 1342242819]], 'IDD_WIZARD_TRAIN': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('Training'), -1, (20, 4, 247, 14), 1342177280], [130, _('SpamBayes is training on your good and spam messages.'), -1, (20, 22, 247, 16), 1342177280], ['msctls_progress32', '', 1000, (20, 45, 255, 11), 1350565888], [130, _('(progress text)'), 1001, (20, 61, 257, 10), 1342177280]], 'IDD_WIZARD_FINISHED_TRAIN_LATER': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('Configuration suspended'), -1, (20, 4, 247, 14), 1342177280], [130, _('To perform initial training, you should create a folder that contains only examples of good messages, and another that contains only examples of spam.'), -1, (20, 17, 247, 27), 1342177280], [130, _('Click Finish to close the wizard.'), -1, (20, 145, 148, 9), 1342177280], [130, _('For examples of good messages, you may like to use your Inbox - however, it is important you remove all spam from this folder before you commence'), -1, (20, 42, 247, 26), 1342177280], [130, _('training. If you have too much spam in your Inbox, you may like to create a temporary folder and copy some examples to it.'), -1, (20, 58, 247, 17), 1342177280], [130, _('For examples of spam messages, you may like to look through your Deleted Items folder, and your Inbox. However, you will not be able to specify the Deleted Items folder as examples of spam, so you will need to move them to a folder you create.'), -1, (20, 80, 247, 35), 1342177280], [130, _('When you are finished, open the SpamBayes Manager via the SpamBayes toolbar, and re-start the Configuration Wizard.'), -1, (20, 121, 245, 17), 1342177280]], 'IDD_FOLDER_SELECTOR': [[_('Dialog'), (0, 0, 247, 215), -1865940800, None, (8, 'Tahoma')], [130, _('&Folders:'), -1, (7, 7, 47, 9), 1342177280], ['SysTreeView32', '', 1040, (7, 21, 172, 140), 1350631735], [128, _('(sub)'), 1041, (7, 167, 126, 9), 1342242819], [130, _('(status1)'), 1043, (7, 180, 220, 9), 1342177280], [130, _('(status2)'), 1044, (7, 194, 220, 9), 1342177280], [128, _('OK'), 1, (190, 21, 50, 14), 1342177281], [128, _('Cancel'), 2, (190, 39, 50, 14), 1342177280], [128, _('C&lear All'), 1042, (190, 58, 50, 14), 1342177280], [128, _('&New folder'), 1046, (190, 77, 50, 14), 1342177280]], 'IDD_STATISTICS': [[_('Statistics'), (0, 0, 248, 257), 1354760256, None, (8, 'Tahoma')], [128, _('Statistics'), -1, (7, 3, 241, 229), 1342177287], [130, _('some stats\\nand some more\\nline 3\\nline 4\\nline 5'), 1095, (12, 12, 230, 204), 1342177280], [128, _('Reset Statistics'), 1096, (178, 238, 70, 14), 1342177280], [130, _('Last reset:'), -1, (7, 241, 36, 8), 1342177280], [130, _('<<<Date>>>'), 1097, (47, 241, 107, 8), 1342177280]], 'IDD_ADVANCED': [[_('Advanced'), (0, 0, 248, 257), 1355284672, 1024, (8, 'Tahoma')], [128, _('Filter timer'), -1, (7, 3, 234, 117), 1342177287], ['msctls_trackbar32', '', 1056, (16, 36, 148, 22), 1342242821], [130, _('Processing start delay'), -1, (16, 26, 101, 8), 1342177280], [129, '', 1057, (165, 39, 40, 14), 1350566016], [130, _('seconds'), -1, (208, 41, 28, 8), 1342177280], ['msctls_trackbar32', '', 1058, (16, 73, 148, 22), 1342242821], [130, _('Delay between processing items'), -1, (16, 62, 142, 8), 1342177280], [129, '', 1059, (165, 79, 40, 14), 1350566016], [130, _('seconds'), -1, (207, 82, 28, 8), 1342177280], [128, _('Only for folders that receive new mail'), 1060, (16, 100, 217, 10), 1342242819], [128, _('Show Data Folder'), 1071, (7, 238, 70, 14), 1342177280], [128, _('Enable background filtering'), 1091, (16, 12, 162, 10), 1342242819], [128, _('Diagnostics...'), 1080, (171, 238, 70, 14), 1342177280]], 'IDD_WIZARD_FINISHED_UNCONFIGURED': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('Configuration cancelled'), -1, (20, 4, 247, 14), 1342177280], [130, _('The main SpamBayes options will now be displayed. You must define your folders and enable SpamBayes before it will begin filtering mail.'), -1, (20, 29, 247, 16), 1342177280], [130, _('Click Finish to close the wizard.'), -1, (20, 139, 148, 9), 1342177280]], 'IDD_WIZARD_WELCOME': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('Welcome to the SpamBayes configuration wizard'), -1, (20, 4, 191, 14), 1342177280], [130, _('This wizard will help you configure the SpamBayes Outlook addin. Please indicate how you have prepared for this application.'), -1, (20, 20, 255, 18), 1342177280], [128, _("I haven't prepared for SpamBayes at all."), 1081, (20, 42, 190, 11), 1342309385], [128, _('I have already sorted good messages (ham) and spam messages into folders that are suitable for training purposes.'), -1, (20, 59, 255, 18), 1342186505], [128, _('I would prefer to configure SpamBayes manually.'), -1, (20, 82, 187, 12), 1342178313], [130, _('If you would like more information about training and configuring SpamBayes, click the About button.'), -1, (20, 103, 185, 20), 1342177280], [128, _('About...'), 1017, (215, 104, 60, 15), 1342177280], [130, _('If you cancel the wizard, you can access it again via the SpamBayes Manager, available from the SpamBayes toolbar.'), -1, (20, 137, 232, 17), 1342177280]], 'IDD_FILTER_NOW': [[_('Filter Now'), (0, 0, 244, 185), -1865940928, 1024, (8, 'Tahoma')], [130, _('Filter the following folders'), -1, (8, 9, 168, 11), 1342177280], [130, _('Folder names...\\nLine 2'), 1036, (7, 20, 172, 12), 1342181900], [128, _('Browse...'), 1037, (187, 19, 50, 14), 1342177280], [128, _('Filter action'), -1, (7, 38, 230, 40), 1342308359], [128, _('Perform all filter actions'), 1019, (15, 49, 126, 10), 1342373897], [128, _("Score messages, but don't perform filter action"), 1018, (15, 62, 203, 10), 1342177289], [128, _('Restrict the filter to'), -1, (7, 84, 230, 35), 1342308359], [128, _('Unread mail'), 1020, (15, 94, 149, 9), 1342242819], [128, _('Mail never previously spam filtered'), 1021, (15, 106, 149, 9), 1342242819], ['msctls_progress32', _('Progress1'), 1000, (7, 129, 230, 11), 1350565888], [130, _('Static'), 1001, (7, 144, 227, 10), 1342177280], [128, _('Start Filtering'), 1006, (7, 161, 52, 14), 1342177281], [128, _('Close'), 2, (187, 162, 50, 14), 1342177280]], 'IDD_WIZARD_TRAINING_IS_IMPORTANT': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('SpamBayes will not be effective until it is trained.'), -1, (11, 8, 191, 14), 1342177280], [128, _('About Training...'), 1017, (209, 140, 65, 15), 1342177280], [130, _('SpamBayes is a system that learns about good and bad mail based on examples you provide. It comes with no built-in rules, so must have some training information before it will be effective.'), -1, (11, 21, 263, 30), 1342177280], [130, _("In this case, SpamBayes will begin by filtering all mail to an 'Unsure' folder. You can then use the 'Spam' and 'Not Spam' buttons to train each message as it arrives. Slowly SpamBayes will learn about your mail."), -1, (22, 61, 252, 29), 1342177280], [130, _('This option will close the wizard, and provide instructions how to sort your mail. You will then be able to configure SpamBayes and have it be immediately effective at filtering your mail'), -1, (22, 106, 252, 27), 1342177280], [130, _('For more information, click the About Training button.'), -1, (11, 143, 187, 12), 1342177280], [128, _('I want to continue without training, and let SpamBayes learn as it goes'), 1088, (11, 50, 263, 11), 1342308361], [128, _('I will pre-sort some good and spam messages, and configure SpamBayes later'), 1089, (11, 92, 263, 11), 1342177289]], 'IDD_FILTER_UNSURE': [[_('Possible Spam'), (0, 0, 249, 124), 1355284672, None, (8, 'Tahoma')], [130, _('To be considered uncertain, a message must score at least'), -1, (12, 11, 212, 10), 1342177280], ['msctls_trackbar32', '', 1029, (12, 18, 165, 20), 1342242821], [129, '', 1030, (183, 24, 54, 14), 1350566016], [130, _('and these messages should be:'), -1, (12, 38, 107, 10), 1342177280], [133, '', 1031, (12, 49, 55, 40), 1344339971], [130, _('to folder'), -1, (74, 52, 31, 10), 1342177280], [130, _('(folder name)'), 1033, (119, 49, 59, 14), 1342312972], [128, _('&Browse'), 1034, (183, 49, 50, 14), 1342177280], [128, _('Mark possible spam as read'), 1051, (12, 70, 101, 10), 1342242819]], 'IDD_WIZARD_FINISHED_UNTRAINED': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [130, _('Congratulations'), -1, (20, 4, 247, 14), 1342177280], [130, _('SpamBayes is now configured and ready to start learning about your Spam'), -1, (20, 22, 247, 16), 1342177280], [130, _("As SpamBayes has not been trained, all new mail will arrive in your Unsure folder. As each message arrives, you should use the 'Spam' or 'Not Spam' toolbar buttons as appropriate."), -1, (20, 42, 247, 27), 1342177280], [130, _("If you wish to speed up the training process, you can move all the existing Spam from your Inbox to the new Spam folder, then select 'Training' from the SpamBayes manager."), -1, (20, 83, 247, 31), 1342177280], [130, _('As you train, you will find the accuracy of SpamBayes increases.'), -1, (20, 69, 247, 15), 1342177280], [130, _('Click Finish to close the wizard.'), -1, (20, 121, 148, 9), 1342177280]], 'IDD_GENERAL': [[_('General'), (0, 0, 253, 257), 1355284672, 1024, (8, 'Tahoma')], [130, _('SpamBayes Version Here'), 1009, (6, 54, 242, 8), 1342177280], [130, _("SpamBayes requiere entrenamiento previo para ser efectivo. Cliquee en la solapa 'Entrenamiento' o use el Asistente de Configuraci\xf3n para entrenar."), -1, (6, 67, 242, 17), 1342177280], [130, _('Estado de la base de datos de entrenamiento:'), -1, (6, 90, 222, 8), 1342177280], [130, _('123 spam messages; 456 good messages\\r\\nLine2\\r\\nLine3'), 1035, (6, 101, 242, 27), 1342181376], [128, _('Habilitar SpamBayes'), 1013, (6, 173, 97, 11), 1342242819], [130, _('Certain spam is moved to Folder1\\nPossible spam is moved too'), 1014, (6, 146, 242, 19), 1342181376], [128, _('Reiniciar la Configuraci\xf3n...'), 1073, (6, 238, 106, 15), 1342177280], [128, _('Asistente de Configuraci\xf3n...'), 1070, (142, 238, 106, 15), 1342177280], [130, _('Estado del filtro:'), -1, (6, 135, 222, 8), 1342177280], [130, '1062', 1063, (0, 2, 275, 52), 1342179342]], 'IDD_FILTER': [[_('Filtering'), (0, 0, 249, 257), 1355284672, 1024, (8, 'Tahoma')], [130, _('Filter the following folders as messages arrive'), -1, (8, 4, 168, 11), 1342177280], [130, _('Folder names...\\nLine 2'), 1038, (7, 16, 177, 12), 1342312972], [128, _('Browse...'), 1039, (192, 14, 50, 14), 1342177280], [128, _('Certain Spam'), -1, (7, 33, 235, 80), 1342177287], [130, _('To be considered certain spam, a message must score at least'), -1, (13, 42, 212, 10), 1342177280], ['msctls_trackbar32', _('Slider1'), 1023, (13, 52, 165, 22), 1342242821], [129, '', 1024, (184, 53, 51, 14), 1350566016], [130, _('and these messages should be:'), -1, (13, 72, 107, 10), 1342177280], [133, '', 1025, (12, 83, 55, 40), 1344339971], [130, _('to folder'), -1, (71, 85, 28, 10), 1342177280], [130, _('Folder names...'), 1027, (102, 83, 77, 14), 1342312972], [128, _('Browse'), 1028, (184, 83, 50, 14), 1342177280], [128, _('Possible Spam'), -1, (6, 117, 235, 81), 1342177287], [130, _('To be considered uncertain, a message must score at least'), -1, (12, 128, 212, 10), 1342177280], ['msctls_trackbar32', _('Slider1'), 1029, (12, 135, 165, 20), 1342242821], [129, '', 1030, (183, 141, 54, 14), 1350566016], [130, _('and these messages should be:'), -1, (12, 155, 107, 10), 1342177280], [133, '', 1031, (12, 166, 55, 40), 1344339971], [130, _('to folder'), -1, (71, 169, 27, 10), 1342177280], [130, _('(folder name)'), 1033, (102, 166, 77, 14), 1342312972], [128, _('&Browse'), 1034, (184, 166, 50, 14), 1342177280], [128, _('Mark spam as read'), 1047, (13, 100, 81, 10), 1342242819], [128, _('Mark possible spam as read'), 1051, (12, 186, 101, 10), 1342242819], [128, _('Certain Good'), -1, (6, 203, 235, 48), 1342177287], [130, _('These messages should be:'), -1, (12, 215, 107, 10), 1342177280], [133, '', 1032, (12, 228, 55, 40), 1344339971], [130, _('to folder'), -1, (71, 230, 27, 10), 1342177280], [130, _('(folder name)'), 1083, (102, 228, 77, 14), 1342312972], [128, _('&Browse'), 1004, (184, 228, 50, 14), 1342177280]], 'IDD_NOTIFICATIONS': [[_('Notifications'), (0, 0, 248, 257), 1354760256, None, (8, 'Tahoma')], [128, _('New Mail Sounds'), -1, (7, 3, 241, 229), 1342177287], [128, _('Enable new mail notification sounds'), 1098, (14, 17, 129, 10), 1342242819], [130, _('Good sound:'), -1, (14, 31, 42, 8), 1342177280], [129, '', 1094, (14, 40, 174, 14), 1350566016], [128, _('Browse...'), 1101, (192, 40, 50, 14), 1342177280], [130, _('Unsure sound:'), -1, (14, 58, 48, 8), 1342177280], [129, '', 1095, (14, 67, 174, 14), 1350566016], [128, _('Browse...'), 1102, (192, 67, 50, 14), 1342177280], [130, _('Spam sound:'), -1, (14, 85, 42, 8), 1342177280], [129, '', 1096, (14, 94, 174, 14), 1350566016], [128, _('Browse...'), 1103, (192, 94, 50, 14), 1342177280], [130, _('Time to wait for additional messages:'), -1, (14, 116, 142, 8), 1342177280], ['msctls_trackbar32', '', 1099, (14, 127, 148, 22), 1342242821], [129, '', 1100, (163, 133, 40, 14), 1350566016], [130, _('seconds'), -1, (205, 136, 28, 8), 1342177280]], 'IDD_WIZARD_FOLDERS_REST': [['', (0, 0, 284, 162), 1354760384, 1024, (8, 'Tahoma')], [128, _('Browse...'), 1005, (208, 85, 60, 15), 1342177280], [130, _('Spam and Unsure Folders'), -1, (20, 4, 247, 14), 1342177280], [130, _("SpamBayes uses two folders to manage your Spam - a folder where 'certain' spam is stored, and another for unsure messages."), -1, (20, 20, 247, 22), 1342177280], [130, _('If you enter a folder name and it does not exist, it will be automatically created. If you would prefer to select an existing folder, click the Browse button.'), -1, (20, 44, 243, 24), 1342177280], [129, '', 1027, (20, 85, 179, 14), 1350566016], [130, _('Unsure messages will be delivered to a folder named'), -1, (20, 105, 186, 12), 1342177280], [129, '', 1033, (20, 117, 177, 14), 1350566016], [130, _('Spam will be delivered to a folder named'), -1, (20, 72, 137, 8), 1342177280], [128, _('Browse...'), 1034, (208, 117, 60, 15), 1342177280]]}
ids = {'IDC_DELAY1_SLIDER': 1056, 'IDC_PROGRESS': 1000, 'IDD_MANAGER': 101, 'IDD_DIAGNOSTIC': 113, 'IDD_TRAINING': 102, 'IDC_DELAY2_TEXT': 1059, 'IDC_DELAY1_TEXT': 1057, 'IDD_WIZARD': 114, 'IDC_BROWSE_SPAM_SOUND': 1103, 'IDC_STATIC_HAM': 1002, 'IDC_PROGRESS_TEXT': 1001, 'IDD_GENERAL': 108, 'IDC_BROWSE_UNSURE_SOUND': 1102, 'IDC_TAB': 1068, 'IDC_FOLDER_UNSURE': 1033, 'IDC_VERBOSE_LOG': 1061, 'IDC_EDIT1': 1094, 'IDC_BROWSE': 1037, 'IDC_BACK_BTN': 1069, 'IDD_WIZARD_FINISHED_UNCONFIGURED': 119, 'IDC_ACTION_CERTAIN': 1025, 'IDC_BUT_ACT_ALL': 1019, 'IDD_FILTER_NOW': 104, 'IDC_BROWSE_HAM_SOUND': 1101, 'IDC_MARK_SPAM_AS_READ': 1047, 'IDC_RECOVER_RS': 1075, 'IDC_STATIC': -1, 'IDC_PAGE_PLACEHOLDER': 1078, 'IDC_BROWSE_WATCH': 1039, 'IDC_ACCUMULATE_DELAY_TEXT': 1100, 'IDC_FOLDER_HAM': 1083, 'IDD_WIZARD_FOLDERS_REST': 117, 'IDC_SHOW_DATA_FOLDER': 1071, 'IDC_BUT_ACT_SCORE': 1018, '_APS_NEXT_RESOURCE_VALUE': 129, '_APS_NEXT_SYMED_VALUE': 101, 'IDC_SLIDER_CERTAIN': 1023, 'IDC_BUT_UNREAD': 1020, 'IDC_BUT_ABOUT': 1017, 'IDC_BUT_RESCORE': 1008, 'IDC_BUT_SEARCHSUB': 1041, 'IDC_BUT_TRAIN_FROM_SPAM_FOLDER': 1010, 'IDC_LAST_RESET_DATE': 1097, 'IDD_WIZARD_FOLDERS_TRAIN': 120, 'IDC_BUT_FILTER_ENABLE': 1013, 'IDC_ABOUT_BTN': 1072, 'IDD_WIZARD_FINISHED_TRAINED': 122, 'IDD_FOLDER_SELECTOR': 105, 'IDD_STATISTICS': 107, 'IDC_LIST_FOLDERS': 1040, 'IDB_SBWIZLOGO': 125, 'IDC_BUT_VIEW_LOG': 1093, 'IDC_STATUS2': 1044, 'IDC_STATUS1': 1043, 'IDCANCEL': 2, 'IDC_BROWSE_HAM': 1004, 'IDC_BROWSE_SPAM': 1005, 'IDD_WIZARD_FINISHED_UNTRAINED': 116, 'IDC_MARK_UNSURE_AS_READ': 1051, 'IDC_BROWSE_HAM_SOUND2': 1103, 'IDC_BUT_WIZARD': 1070, 'IDC_VERSION': 1009, 'IDC_FOLDER_NAMES': 1036, 'IDC_BUT_TIMER_ENABLED': 1091, 'IDC_SLIDER_UNSURE': 1029, 'IDC_BUT_NEW': 1046, 'IDC_FOLDER_WATCH': 1038, 'IDC_BUT_UNTRAINED': 1088, 'IDC_STATIC_SPAM': 1003, 'IDC_EDIT_UNSURE': 1030, 'IDC_BUT_CLEARALL': 1042, 'IDC_BUT_UNSEEN': 1021, 'IDD_WIZARD_FOLDERS_WATCH': 118, 'IDC_HAM_SOUND': 1094, 'IDC_EDIT_CERTAIN': 1024, 'IDC_BUT_FILTER_DEFINE': 1016, 'IDC_FORWARD_BTN': 1077, '_APS_NEXT_CONTROL_VALUE': 1102, 'IDC_INBOX_TIMER_ONLY': 1060, 'IDD_ADVANCED': 106, 'IDC_WIZ_GRAPHIC': 1092, 'IDD_FILTER_UNSURE': 40002, 'IDC_DEL_SPAM_RS': 1074, 'IDB_FOLDERS': 127, 'IDC_BUT_PREPARATION': 1081, 'IDC_DELAY2_SLIDER': 1058, 'IDC_ACCUMULATE_DELAY_SLIDER': 1099, 'IDC_SAVE_SPAM_SCORE': 1048, 'IDC_FOLDER_CERTAIN': 1027, 'IDB_SBLOGO': 1062, 'IDC_BROWSE_UNSURE': 1034, 'IDC_STATISTICS': 1095, 'IDC_BUT_RESET_STATS': 1096, 'IDC_BUT_TRAIN_TO_SPAM_FOLDER': 1011, 'IDD_FILTER_SPAM': 110, 'IDC_BUT_RESET': 1073, 'IDD_NOTIFICATIONS': 128, 'IDC_ACTION_UNSURE': 1031, 'IDD_WIZARD_TRAIN': 121, 'IDD_WIZARD_FINISHED_TRAIN_LATER': 124, 'IDC_ACTION_HAM': 1032, 'IDC_BUT_REBUILD': 1007, '_APS_NEXT_COMMAND_VALUE': 40001, 'IDC_ENABLE_SOUNDS': 1098, 'IDC_SPAM_SOUND': 1096, 'IDC_UNSURE_SOUND': 1095, 'IDD_WIZARD_TRAINING_IS_IMPORTANT': 123, 'IDC_TRAINING_STATUS': 1035, 'IDD_WIZARD_WELCOME': 115, 'IDC_BUT_TRAIN': 1089, 'IDC_START': 1006, 'IDD_FILTER': 103, 'IDC_LOGO_GRAPHIC': 1063, 'IDC_FILTER_STATUS': 1014, 'IDOK': 1, 'IDC_BROWSE_CERTAIN': 1028, 'IDC_BUT_SHOW_DIAGNOSTICS': 1080, 'IDC_BUT_TRAIN_NOW': 1012}
names = {1024: 'IDC_EDIT_CERTAIN', 1: 'IDOK', 2: 'IDCANCEL', 1027: 'IDC_FOLDER_CERTAIN', 1028: 'IDC_BROWSE_CERTAIN', 1029: 'IDC_SLIDER_UNSURE', 1030: 'IDC_EDIT_UNSURE', 1031: 'IDC_ACTION_UNSURE', 1032: 'IDC_ACTION_HAM', 1033: 'IDC_FOLDER_UNSURE', 1034: 'IDC_BROWSE_UNSURE', 1035: 'IDC_TRAINING_STATUS', 1036: 'IDC_FOLDER_NAMES', 1037: 'IDC_BROWSE', 1038: 'IDC_FOLDER_WATCH', 1039: 'IDC_BROWSE_WATCH', 1040: 'IDC_LIST_FOLDERS', 1041: 'IDC_BUT_SEARCHSUB', 1042: 'IDC_BUT_CLEARALL', 1043: 'IDC_STATUS1', 1044: 'IDC_STATUS2', 1046: 'IDC_BUT_NEW', 1047: 'IDC_MARK_SPAM_AS_READ', 1048: 'IDC_SAVE_SPAM_SCORE', 1051: 'IDC_MARK_UNSURE_AS_READ', 1056: 'IDC_DELAY1_SLIDER', 1057: 'IDC_DELAY1_TEXT', 1058: 'IDC_DELAY2_SLIDER', 1059: 'IDC_DELAY2_TEXT', 1060: 'IDC_INBOX_TIMER_ONLY', 1061: 'IDC_VERBOSE_LOG', 1062: 'IDB_SBLOGO', 1063: 'IDC_LOGO_GRAPHIC', 1068: 'IDC_TAB', 1069: 'IDC_BACK_BTN', 1070: 'IDC_BUT_WIZARD', 1071: 'IDC_SHOW_DATA_FOLDER', 1072: 'IDC_ABOUT_BTN', 1073: 'IDC_BUT_RESET', 1074: 'IDC_DEL_SPAM_RS', 1075: 'IDC_RECOVER_RS', 1077: 'IDC_FORWARD_BTN', 1078: 'IDC_PAGE_PLACEHOLDER', 1080: 'IDC_BUT_SHOW_DIAGNOSTICS', 1081: 'IDC_BUT_PREPARATION', 1083: 'IDC_FOLDER_HAM', 1088: 'IDC_BUT_UNTRAINED', 1089: 'IDC_BUT_TRAIN', 40002: 'IDD_FILTER_UNSURE', 1091: 'IDC_BUT_TIMER_ENABLED', 1025: 'IDC_ACTION_CERTAIN', 1093: 'IDC_BUT_VIEW_LOG', 1094: 'IDC_EDIT1', 1095: 'IDC_STATISTICS', 1096: 'IDC_BUT_RESET_STATS', 1097: 'IDC_LAST_RESET_DATE', 1098: 'IDC_ENABLE_SOUNDS', 1099: 'IDC_ACCUMULATE_DELAY_SLIDER', 1100: 'IDC_ACCUMULATE_DELAY_TEXT', 1101: 'IDC_BROWSE_HAM_SOUND', 1102: 'IDC_BROWSE_UNSURE_SOUND', 1103: 'IDC_BROWSE_HAM_SOUND2', 101: 'IDD_MANAGER', 102: 'IDD_TRAINING', 103: 'IDD_FILTER', 104: 'IDD_FILTER_NOW', 105: 'IDD_FOLDER_SELECTOR', 106: 'IDD_ADVANCED', 107: 'IDD_STATISTICS', 108: 'IDD_GENERAL', 110: 'IDD_FILTER_SPAM', 113: 'IDD_DIAGNOSTIC', 114: 'IDD_WIZARD', 115: 'IDD_WIZARD_WELCOME', 116: 'IDD_WIZARD_FINISHED_UNTRAINED', 117: 'IDD_WIZARD_FOLDERS_REST', 118: 'IDD_WIZARD_FOLDERS_WATCH', 119: 'IDD_WIZARD_FINISHED_UNCONFIGURED', 120: 'IDD_WIZARD_FOLDERS_TRAIN', 121: 'IDD_WIZARD_TRAIN', 122: 'IDD_WIZARD_FINISHED_TRAINED', 123: 'IDD_WIZARD_TRAINING_IS_IMPORTANT', 124: 'IDD_WIZARD_FINISHED_TRAIN_LATER', 125: 'IDB_SBWIZLOGO', 127: 'IDB_FOLDERS', 128: 'IDD_NOTIFICATIONS', 129: '_APS_NEXT_RESOURCE_VALUE', 40001: '_APS_NEXT_COMMAND_VALUE', 1092: 'IDC_WIZ_GRAPHIC', 1000: 'IDC_PROGRESS', 1001: 'IDC_PROGRESS_TEXT', 1002: 'IDC_STATIC_HAM', 1003: 'IDC_STATIC_SPAM', 1004: 'IDC_BROWSE_HAM', 1005: 'IDC_BROWSE_SPAM', 1006: 'IDC_START', 1007: 'IDC_BUT_REBUILD', 1008: 'IDC_BUT_RESCORE', 1009: 'IDC_VERSION', 1010: 'IDC_BUT_TRAIN_FROM_SPAM_FOLDER', 1011: 'IDC_BUT_TRAIN_TO_SPAM_FOLDER', 1012: 'IDC_BUT_TRAIN_NOW', 1013: 'IDC_BUT_FILTER_ENABLE', 1014: 'IDC_FILTER_STATUS', 1016: 'IDC_BUT_FILTER_DEFINE', 1017: 'IDC_BUT_ABOUT', 1018: 'IDC_BUT_ACT_SCORE', 1019: 'IDC_BUT_ACT_ALL', 1020: 'IDC_BUT_UNREAD', 1021: 'IDC_BUT_UNSEEN', -1: 'IDC_STATIC', 1023: 'IDC_SLIDER_CERTAIN'}
bitmaps = {'IDB_SBWIZLOGO': 'sbwizlogo.bmp', 'IDB_SBLOGO': 'sblogo.bmp', 'IDB_FOLDERS': 'folders.bmp'}
def ParseDialogs(s):
return FakeParser()
| [
"[email protected]"
] | |
061399c156296dfd0f08ab0ef22d181d250ea69e | 856323fc904cd36b947114666186a2bcd0c1e10e | /tests/randMove/test_randMoveSIS.py | f612f0051507494ef17fd722dc003e328d396a01 | [
"MIT"
] | permissive | mjacob1002/Eir | fd6ee0fa7c2e0af93a34dca66bcd5b07a5c31f05 | ab9cb4e353796ba3ab79b1673adc251d434717cf | refs/heads/master | 2023-04-15T13:06:14.897503 | 2021-07-04T20:06:15 | 2021-07-04T20:06:15 | 286,567,858 | 39 | 9 | MIT | 2021-07-04T20:06:16 | 2020-08-10T20:03:02 | Python | UTF-8 | Python | false | false | 3,222 | py | import numpy as np
import pandas as pd
import unittest
from Eir.DTMC.spatialModel.randomMovement.randMoveSIS import RandMoveSIS
import Eir.exceptions as e
np.random.seed(35235)
class Test_RandMoveSIS(unittest.TestCase):
def __init__(self):
self.test = RandMoveSIS(999, 2, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.sdetails = self.test.run()
def generateCSV(self):
df = self.test.toDataFrame()
df.to_csv("randMoveSIS.csv", index=False)
def checkOutput(self):
df = self.test.toDataFrame()
df2 = pd.read_csv("randMoveSIS.csv")
assert df.equals(df2)
print("Output test passed")
def checkSimulInputs(self):
# checks for invalid person inputs
self.assertRaises(e.NotIntException, self.sdetails.personHistory, 100.0)
self.assertRaises(e.PersonNotFound, self.sdetails.personHistory, 1001)
# checks for exceptions when inputting days
self.assertRaises(e.DayOutOfRange, self.sdetails.transmissionHistoryOnDay, 65)
self.assertRaises(e.DayOutOfRange, self.sdetails.transmissionHistoryOnDay, -1)
self.assertRaises(e.NotIntException, self.sdetails.transmissionHistoryOnDay, 25.0)
print("Simul_Details input test passed: throws error for invalid inputs")
def checkInputs(self):
# int check
self.assertRaises(e.NotIntException, RandMoveSIS, 999.0, 1, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotIntException, RandMoveSIS, 999, 1.0, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotIntException, RandMoveSIS, 999, 1, .3, 25, 3, .3, 1, .25, 31.0, 1.0, 2.0)
# float check
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, '.3', 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, "25", 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, 25, "3", .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, 25, 3, True, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NotFloatException, RandMoveSIS, 999, 1, .3, 25, 3, .3, 1, .25, 31, False, 2.0)
# negvalue check
self.assertRaises(e.NegativeValException, RandMoveSIS, -999, 1, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, -1, .3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, 1, -.3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, 1, .3, -25, 3, .3, 1, .25, 31, 1.0, 2.0)
self.assertRaises(e.NegativeValException, RandMoveSIS, 999, 1, .3, 25, -3, .3, 1, .25, 31, 1.0, 2.0)
# probability check
self.assertRaises(e.ProbabilityException, RandMoveSIS, 999, 1, .3, 25, 3, .3, 1, .25, 31, 1.01, 2.0)
self.assertRaises(e.ProbabilityException, RandMoveSIS, 999, 1, 1.3, 25, 3, .3, 1, .25, 31, 1.0, 2.0)
print("Input Test passed")
if __name__ == '__main__':
a = Test_RandMoveSIS()
#a.generateCSV()
a.checkOutput()
a.checkSimulInputs()
a.checkInputs()
| [
"[email protected]"
] | |
871f553222511c050f1cdb9731b0b3044efd8503 | b2cc6507d5260514f63a3f0aa7915308cd20bf28 | /shelf/migrations/0002_auto_20200426_1245.py | 906189220f06b19dd541cfceb01b3172ff017dee | [] | no_license | japawka/kozaczko | 4df09523543a15ae59abad24b689730fecca8049 | 20cf9e0e83588b6ecd79ab10889925144688f2b7 | refs/heads/master | 2022-06-07T21:37:42.788616 | 2020-04-28T13:39:09 | 2020-04-28T13:39:09 | 259,619,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,426 | py | # Generated by Django 3.0.5 on 2020-04-26 10:45
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('shelf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BookEdition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isbn', models.CharField(max_length=17)),
('date', models.DateField()),
],
),
migrations.RemoveField(
model_name='book',
name='author',
),
migrations.RemoveField(
model_name='book',
name='isbn',
),
migrations.RemoveField(
model_name='book',
name='publisher',
),
migrations.AddField(
model_name='book',
name='authors',
field=models.ManyToManyField(to='shelf.Author'),
),
migrations.CreateModel(
name='BookItem',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('cat_number', models.CharField(max_length=30)),
('cover_type', models.CharField(choices=[('soft', 'Soft'), ('hard', 'Hard')], max_length=4)),
('edition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.BookEdition')),
],
),
migrations.AddField(
model_name='bookedition',
name='book',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.Book'),
),
migrations.AddField(
model_name='bookedition',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.Publisher'),
),
migrations.AddField(
model_name='book',
name='categories',
field=models.ManyToManyField(to='shelf.BookCategory'),
),
]
| [
"[email protected]"
] | |
ec326ddb3da2f9fb2c78585e9f54d0baaac9afc1 | 8140877476ec5eb97a5c096105cefc58b160a1bf | /mysite/settings.py | b5f0bf2082451aab6cfbe93147072e8d421f3a8b | [] | no_license | Missespada/my-first-blog | b931321d618a06315ac8516d96f5fd87d8dffef3 | 71cbcc58f0d9102af4189d0e38eb5f6d043eb186 | refs/heads/master | 2021-01-22T05:10:26.056054 | 2015-07-25T03:09:36 | 2015-07-25T03:09:36 | 39,307,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,805 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u&np25!7*g#p9y1y%jko#ei8rra4kfq(a_1i!kxvvn)6v@-ifr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# STATIC_ROOT = ''
# STATIC_URL = '/static/'
# STATICFILES_DIRS = ( os.path.join('static'), ) | [
"[email protected]"
] | |
a53f4e73d6c753979be2329785696ae68b6dc336 | 438e546e2acf5aa57c34c6481e477f7025b12e21 | /mocks/skowser_session3_question1.py | ddbe00521f78b177bdaa90997f0a5043f2b26f86 | [] | no_license | SajinKowserSK/algorithms-practice | 988537ef3537487cb40c78776dd2c9e1130cde4f | 41bbd55553747492a539b41f6e86bff5504c5842 | refs/heads/master | 2022-11-06T18:22:41.329484 | 2022-10-19T23:40:10 | 2022-10-19T23:40:10 | 206,470,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def has_cycle(head):
if head is None:
return False
slow = head
fast = head
while fast is not None:
fast = fast.next
if fast is None:
break
else:
fast = fast.next
if slow == fast:
return True
slow = slow.next
return False
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node5.next = None
print(has_cycle(node1)) # False
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node5.next = node2
print(has_cycle(node1)) # True
| [
"[email protected]"
] | |
0eacc8850c2e0a1b284058bd4cf49418cf285991 | 5a6da40024217c0ca2c0242f031d6d0105775899 | /utils.py | 13bdef9709be85f99880404dcbf366620db0ea5f | [
"MIT"
] | permissive | tobyma/generative | 06fa928c8882469a3fe35cb69863c1ae06409021 | 75cb6c1065cd04ce4eaf13b90b037137a8ad7873 | refs/heads/master | 2020-04-22T13:53:58.599101 | 2017-03-04T05:01:16 | 2017-03-04T05:01:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
def data_on_latent_space(encoded, categories, ax=None):
"""
plots the data in the latent space
encoded: first two dimensions of the data encoded
categories: the categories for each datapoint to (for visualization purposes)
batch_size[=32]: the batch size for the predictions
ax[=None]: axis to add the plot to
"""
if not ax:
f = plt.figure(figsize=(6, 6))
ax = f.add_subplot(111)
else:
f = None
ax.scatter(encoded[:,0], encoded[:,1], c=categories)
return f, ax
def manifold_2D(generator, ax=None, n=15, shape=(28,28), latent_space='gaussian', latent_range=(0.05, 0.95)):
""" display a 2D manifold of the digits
@params:
generator: a generator with a .predict() function
ax[=None]: axis to add the plot to
n[=15]: number of samples to generate for each dimension
shape[=(28,28)]: reshape of the sample
latent_space[='gaussian']
latent_range[=(0.05,0.95)]
@returns:
matplotlib axes with the figure added.
"""
digit_size = shape[0]
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
if latent_space == 'gaussian':
grid_x = stats.norm.ppf(np.linspace(latent_range[0], latent_range[1], n))
grid_y = stats.norm.ppf(np.linspace(latent_range[0], latent_range[1], n))
else:
raise NotImplementedError('Unknown Latent Space not yet implemented')
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = generator.predict(z_sample)
digit = x_decoded[0].reshape((digit_size, digit_size))
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
if not ax:
f = plt.figure(figsize=(10, 10))
ax = f.add_subplot(111)
else:
f = None
ax.imshow(figure, cmap='Greys_r')
return f, ax
| [
"[email protected]"
] | |
8ddf00dbe966270bc91547dc197e635375b74c13 | 264ff719d21f2f57451f322e9296b2f55b473eb2 | /gvsoc/gvsoc/models/pulp/chips/vega/apb_soc.py | d7f74fcef931aeea1e95778960d96b989c1f3330 | [
"Apache-2.0"
] | permissive | knmcguire/gap_sdk | 06c9537c16fa45dea6b7f5c6b162b53953262915 | 7b0a09a353ab6f0550793d40bd46e98051f4a3d7 | refs/heads/master | 2020-12-20T06:51:19.580497 | 2020-01-21T14:52:28 | 2020-01-21T14:52:28 | 235,992,961 | 0 | 0 | Apache-2.0 | 2020-01-24T11:45:59 | 2020-01-24T11:45:58 | null | UTF-8 | Python | false | false | 780 | py | #
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH ([email protected])
import vp_core as vp
class component(vp.component):
implementation = 'pulp.chips/vega/apb_soc_impl'
| [
"[email protected]"
] | |
13c85d6c02d79c842951ec2a8ed8df7d47224ca7 | f070c3acba7da2254adc2c12f80e54b830396d40 | /test/venv/lib/python3.6/site-packages/neutronclient/tests/unit/test_cli20_subnetpool.py | 60d25de6f4fc8ee3e3351e6948d6c6f219bcbbeb | [] | no_license | liruidesysu/cloudCluster | 241a6ac472ecce9c6b4c966a44304128d258fc9b | fc558b464c3052f59cb1e6326aa22bade556b0c8 | refs/heads/master | 2022-11-06T03:51:31.954607 | 2019-08-22T12:47:53 | 2019-08-22T12:47:53 | 200,144,454 | 0 | 1 | null | 2022-03-29T21:56:02 | 2019-08-02T01:42:17 | Python | UTF-8 | Python | false | false | 8,501 | py | # Copyright 2015 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from mox3 import mox
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import subnetpool
from neutronclient.tests.unit import test_cli20
class CLITestV20SubnetPoolJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['subnetpool']
def setUp(self):
super(CLITestV20SubnetPoolJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_subnetpool_shared(self):
# Create subnetpool: myname.
resource = 'subnetpool'
cmd = subnetpool.CreateSubnetPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
min_prefixlen = 30
prefix1 = '10.11.12.0/24'
prefix2 = '12.11.13.0/24'
args = [name, '--min-prefixlen', str(min_prefixlen),
'--pool-prefix', prefix1, '--pool-prefix', prefix2,
'--shared']
position_names = ['name', 'min_prefixlen', 'prefixes', 'shared']
position_values = [name, min_prefixlen, [prefix1, prefix2], True]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnetpool_not_shared(self):
# Create subnetpool: myname.
resource = 'subnetpool'
cmd = subnetpool.CreateSubnetPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
min_prefixlen = 30
prefix1 = '10.11.12.0/24'
prefix2 = '12.11.13.0/24'
args = [name, '--min-prefixlen', str(min_prefixlen),
'--pool-prefix', prefix1, '--pool-prefix', prefix2]
position_names = ['name', 'min_prefixlen', 'prefixes']
position_values = [name, min_prefixlen, [prefix1, prefix2]]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnetpool(self, default='false'):
# Create subnetpool: myname.
resource = 'subnetpool'
cmd = subnetpool.CreateSubnetPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
min_prefixlen = 30
prefix1 = '10.11.12.0/24'
prefix2 = '12.11.13.0/24'
args = [name, '--min-prefixlen', str(min_prefixlen),
'--pool-prefix', prefix1, '--pool-prefix', prefix2,
'--is-default', default]
position_names = ['name', 'min_prefixlen', 'prefixes', 'is_default']
position_values = [name, min_prefixlen, [prefix1, prefix2], default]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnetpool_default(self):
self.test_create_subnetpool(default='true')
def test_create_subnetpool_with_unicode(self):
# Create subnetpool: u'\u7f51\u7edc'.
resource = 'subnetpool'
cmd = subnetpool.CreateSubnetPool(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
min_prefixlen = 30
prefixes = '10.11.12.0/24'
args = [name, '--min-prefixlen', str(min_prefixlen),
'--pool-prefix', prefixes]
position_names = ['name', 'min_prefixlen', 'prefixes']
position_values = [name, min_prefixlen, [prefixes]]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnetpool_with_addrscope(self):
# Create subnetpool: myname in addrscope: foo-address-scope
resource = 'subnetpool'
cmd = subnetpool.CreateSubnetPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
min_prefixlen = 30
prefix1 = '11.11.11.0/24'
prefix2 = '12.12.12.0/24'
address_scope = 'foo-address-scope'
args = [name, '--min-prefixlen', str(min_prefixlen),
'--pool-prefix', prefix1, '--pool-prefix', prefix2,
'--address-scope', address_scope]
position_names = ['name', 'min_prefixlen', 'prefixes',
'address_scope_id']
position_values = [name, min_prefixlen, [prefix1, prefix2],
address_scope]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_subnetpool_pagination(self):
cmd = subnetpool.ListSubnetPool(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(subnetpool.ListSubnetPool, "extend_list")
subnetpool.ListSubnetPool.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("subnetpools", cmd)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_list_subnetpools_sort(self):
# List subnetpools:
# --sort-key name --sort-key id --sort-key asc --sort-key desc
resources = "subnetpools"
cmd = subnetpool.ListSubnetPool(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_subnetpools_limit(self):
# List subnetpools: -P.
resources = "subnetpools"
cmd = subnetpool.ListSubnetPool(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_update_subnetpool_exception(self):
# Update subnetpool: myid.
resource = 'subnetpool'
cmd = subnetpool.UpdateSubnetPool(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_subnetpool(self):
# Update subnetpool: myid --name myname.
resource = 'subnetpool'
cmd = subnetpool.UpdateSubnetPool(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname'],
{'name': 'myname'}
)
def test_update_subnetpool_with_address_scope(self):
# Update subnetpool: myid --address-scope newscope.
resource = 'subnetpool'
cmd = subnetpool.UpdateSubnetPool(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--address-scope', 'newscope'],
{'address_scope_id': 'newscope'}
)
def test_update_subnetpool_with_no_address_scope(self):
# Update subnetpool: myid --no-address-scope.
resource = 'subnetpool'
cmd = subnetpool.UpdateSubnetPool(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-address-scope'],
{'address_scope_id': None}
)
def test_show_subnetpool(self):
# Show subnetpool: --fields id --fields name myid.
resource = 'subnetpool'
cmd = subnetpool.ShowSubnetPool(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_subnetpool(self):
# Delete subnetpool: subnetpoolid.
resource = 'subnetpool'
cmd = subnetpool.DeleteSubnetPool(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
| [
"[email protected]"
] | |
ab4f0ea1158e91ecc01b43c3f75c06274b029f90 | 3fd3da4f11a251cc43d44d1d61ff2ffe5c82a4ce | /零起点TF与量化交易源码kc_demo/kc_demo/ztools_tq.py | 7a06723cdaee4c787316a1b5df6951447e284e59 | [] | no_license | dumpinfo/TsBook | d95faded917bce3e024e77ff06afd30717ed9ef4 | 8fadfcd2ebf935cd49784fd27d66b2fd9f307fbd | refs/heads/master | 2023-05-27T07:56:24.149421 | 2019-07-31T20:51:52 | 2019-07-31T20:51:52 | 198,481,031 | 1 | 3 | null | 2023-05-22T21:13:31 | 2019-07-23T17:47:19 | Jupyter Notebook | UTF-8 | Python | false | false | 20,846 | py | # -*- coding: utf-8 -*-
'''
TopQuant-简称TQ极宽智能量化回溯分析系统,培训课件-配套教学python程序
Top极宽量化(原zw量化),Python量化第一品牌
by Top极宽·量化开源团队 2017.10.1 首发
网站: www.TopQuant.vip www.ziwang.com
QQ群: Top极宽量化1群,124134140
Top极宽量化2群,650924099
Top极宽量化3群,450853713
文件名:ztools_tq.py
默认缩写:import ztools_tq as ztq
简介:Top极宽量化·常用量化工具函数集
'''
#
import sys,os,re,pickle
import arrow,bs4,random,copy
import numexpr as ne
import numpy as np
import pandas as pd
import tushare as ts
#import talib as ta
import pypinyin
#
import matplotlib as mpl
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
#import multiprocessing
#
import keras as ks
import sklearn
from sklearn import metrics
#
import keras
from keras.models import Sequential,load_model
from keras.utils import plot_model
#
import tflearn
import tensorflow as tf
#
import zsys
import ztools as zt
import ztools_str as zstr
import ztools_data as zdat
#-------------------
#
import zpd_talib as zta
#
#-------------------
#-------init.TQ.xxx,qx.xxx
def tq_init(rs0,codLst,inxLst=['000001'],priceSgn='avg',prjNam='TQ001'):
pd.set_option('display.width', 450)
pd.set_option('display.float_format', zt.xfloat3)
#
qx=zsys.TQ_bar()
qx.rdat0,qx.prjNam=rs0,prjNam
qx.stkCodeLst,qx.inxCodeLst=codLst,inxLst
#qx.codID,qx.codFN=xcod,zsys.rdatCN+xcod+'.csv'
qx.priceSgn=priceSgn
qx.priceDateFlag=(rs0.upper().find('MIN')<0)
#
print('tq_init name...')
#f_stkCodNamTbl='stk_code.csv'
fstk,finx=zsys.rdatInx+zsys.f_stkNamTbl,zsys.rdatInx+zsys.f_inxNamTbl
qx.stkNamTbl=pd.read_csv(fstk,dtype={'code':str},encoding='GBK')
qx.inxNamTbl=pd.read_csv(finx,dtype={'code':str},encoding='GBK')
#
print('tq_init pools...')
if qx.priceDateFlag:
qx.stkPools=zdat.pools_frd(rs0+'day/',codLst)
qx.inxPools=zdat.pools_frd(rs0+'xday/',inxLst)
else:
qx.stkPools=zdat.pools_frd(rs0+'m05/',codLst)
qx.inxPools=zdat.pools_frd(rs0+'xm05/',inxLst)
print('tq_init work data...')
if len(codLst)>0:
xcod=qx.stkCodeLst[0]
qx.wrkStkCod=xcod
qx.wrkStkDat=qx.stkPools[xcod]
qx.wrkStkInfo=qx.stkNamTbl[qx.stkNamTbl.code==xcod]
#
if len(inxLst)>0:
xinx=qx.inxCodeLst[0]
qx.wrkInxCod=xinx
qx.wrkInxDat=qx.inxPools[xinx]
qx.wrkInxInfo=qx.inxNamTbl[qx.inxNamTbl.code==xinx]
#
return qx
def tq_prVar(qx):
print('\nobj:qx')
zt.xobjPr(qx)
#
print('\nzsys.xxx')
print(' rdat0,',zsys.rdat0)
print(' rdatCN,',zsys.rdatCN)
print(' rdatCNX,',zsys.rdatCNX)
print(' rdatInx,',zsys.rdatInx)
print(' rdatMin0,',zsys.rdatMin0)
print(' rdatTick,',zsys.rdatTick)
#
print('\ncode list:',qx.stkCodeLst)
print(' inx list:',qx.inxCodeLst)
#
zt.prx('stk info',qx.wrkStkInfo)
zt.prx('inx info',qx.wrkInxInfo)
zt.prx('wrkStkDat',qx.wrkStkDat.tail())
#
zt.prx('btTimLst',qx.btTimLst)
zt.prx('usrPools',qx.usrPools) #用户股票池资产数据 字典格式
print('\nusrMoney,usrTotal:',qx.usrMoney,qx.usrTotal)
#
tq_prTrdlib(qx)
#zt.prx('qx.trdLib',qx.trdLib.head())
#zt.prx('qx.trdLib',qx.trdLib.tail())
#
def tq_prWrk(qx):
print('\n\t bt_main_1day,',qx.wrkStkCod,qx.wrkTimStr)
#
zt.prx('stk info',qx.wrkStkInfo)
zt.prx('inx info',qx.wrkInxInfo)
zt.prx('wrkStkDat.head',qx.wrkStkDat.head(10))
zt.prx('wrkStkDat.tail',qx.wrkStkDat.tail(10))
#
zt.prx('btTimLst',qx.btTimLst)
zt.prx('usrPools',qx.usrPools) #用户股票池资产数据 字典格式
print('\nusrMoney,usrTotal:',qx.usrMoney,qx.usrTotal)
#
#zt.prx('qx.trdLib',qx.trdLib.head())
#zt.prx('qx.trdLib',qx.trdLib.tail())
tq_prTrdlib(qx,30)
def tq_prTrdlib(qx,n9=10):
print('\nqx.trdLib')
dfq,nc=qx.trdLib,0
dfq=dfq.round(3)
print('\ttime,\tID,\tcash,\t\tusrPools')
for xc,row in dfq.iterrows():
nc+=1
if nc<n9:
upools=row['upools']
xss='{0},{1},${2:0.2f}'.format(row['time'],row['ID'],row['cash'])
print(xss,'\t',upools)
#
dn9=len(dfq.index)
if dn9>n9:
print('......')
nc0,nc=dn9-n9,0
for xc,row in dfq.iterrows():
nc+=1
if nc>nc0:
upools=row['upools']
xss='{0},{1},${2:0.2f}'.format(row['time'],row['ID'],row['cash'])
print(xss,'\t',upools)
#
print('\nn-trdlib:',dn9)
#-------tq.pools.xxxx
def tq_pools_wr(qx):
fss=qx.rtmp+qx.wrkStkCod+'.csv'
qx.wrkStkDat.to_csv(fss)
def tq_pools_chk(qx):
print('\n@tq_pools_chk,xcode',qx.wrkStkCod)
print(qx.wrkStkDat.tail())
def tq_pools_call(qx,xfun):
for xcod in qx.stkCodeLst:
qx.wrkStkCod=xcod
qx.wrkStkDat=qx.stkPools[xcod]
#sta_dataPre(qx)
xfun(qx)
qx.stkPools[xcod]=qx.wrkStkDat
#
#print('\ntq_pools_call,',xcod)
#print(qx.stkPools[xcod].tail())
#
return qx
#---------------tq.trd.xxx
#---------------------------tq.stk.xxx
def tq_stkGetPrice(df,ksgn,xtim):
'''
获取当前价格
Args:
qx (zwQuantX): zwQuantX交易数据包
ksgn (str): 价格模式代码
'''
#d10=dfw.stkLib[qx.stkCode]
d01=df[xtim:xtim];
#
price=0;
if len(d01)>0:
d02=d01[ksgn]
price=d02[0];
if pd.isnull(price):
d02=d01['dprice']
price=d02[0];
#
price=round(price,3)
return price
#---------------------------stk
def stk2data_pre8FN(fss):
if not os.path.exists(fss):
return None
#
df=pd.read_csv(fss,index_col=0)
df['avg']=df[zsys.ohlcLst].mean(axis=1)
#
df['avg']=df[zsys.ohlcLst].mean(axis=1)
df,avg_lst=zdat.df_xshift(df,ksgn='avg',num9=10)
#print('avg_lst,',avg_lst)
#
mv_lst=[2,3,5,10,15,20,30,50,100,150,200]
#ma_lst=[2,3,4,5,6,7,8,9,10,15,20,30,40,50,60,80,100,120,150,180,200,250,300]
df=zta.mul_talib(zta.MA,df, ksgn='avg',vlst=mv_lst)
ma_lst=zstr.sgn_4lst('ma',mv_lst)
#
df['xtim']=df.index
df['xyear']=df['xtim'].apply(zstr.str_2xtim,ksgn='y')
df['xmonth']=df['xtim'].apply(zstr.str_2xtim,ksgn='m')
df['xday']=df['xtim'].apply(zstr.str_2xtim,ksgn='d')
df['xweekday']=df['xtim'].apply(zstr.str_2xtim,ksgn='w')
tim_lst=['xyear','xmonth','xday','xweekday']
#
df['price']=df['avg']
df['price_next']=df[avg_lst].max(axis=1)
#涨跌幅,zsys.k_price_change=1000
df['price_change']=df['price_next']/df['price']*100
#df['ktype']=df['price_change'].apply(zt.iff2type,d0=100)
#def dat2type(d,k9=2000,k0=0):
#fd>120
#
df=df.dropna()
#df['ktype']=round(df['price_change']).astype(int)
#df['ktype']=df['kprice'].apply(zt.iff2type,d0=100)
#df['ktype']=df['price_change'].apply(zt.iff3type,v0=95,v9=105,v3=3,v2=2,v1=1)
#
df=df.round(3)
return df
def stk2data_pre8Flst(finx,rss):
flst=pd.read_csv(finx,index_col=False,dtype='str',encoding='gbk')
df9=pd.DataFrame()
xc=0
for xcod in flst['code']:
#print(xcod)
xc+=1
fss=rss+xcod+'.csv';print(xc,'#',fss)
df=stk2data_pre8FN(fss)
df9=df9.append(df)
#
return df9
#---------------------------user.xxx
def tq_usrIDSet(qx):
''' 生成订单流水号编码ID
#ID=prjName+'_'+trdCnt(000000)
'''
qx.trdCnt+=1;
nss='{:05d}'.format(qx.trdCnt);
qx.trdID=qx.prjNam+'_'+nss;
#
return qx.trdID
#---------------------------user.pools
def tq_usrPoolsMerge(qx):
#inx_cod,ksgn=qx.inxCodeLst[0],qx.priceSgn
#df_inx=qx.inxPools[inx_cod]
ksgn=qx.priceSgn
df9=pd.DataFrame()
for xcod in qx.inxCodeLst:
df=qx.inxPools[xcod]
df9['x'+xcod]=df[ksgn]
#
for xcod in qx.stkCodeLst:
df=qx.stkPools[xcod]
df9[xcod]=df[ksgn]
#
return df9
def tq_usrPoolsAdd(upools,xcod,ksgn,dat):
usr1=upools.get(xcod)
if usr1==None:usr1={}
v=tq_usrPoolsGet(upools,xcod,ksgn)
#
usr1[ksgn]=round(v+dat,2)
#
upools[xcod]=usr1
#print(v,v+dat,'v',xcod,ksgn,dat)
#
return upools
def tq_usrPoolsGet(upools,xcod,ksgn):
v,usr1=0,upools.get(xcod)
#print('v,usr1',v,usr1,xcod,upools)
if usr1!=None:v=usr1.get(ksgn)
#print('v,usr2',v,usr1)
if v==None:v=0
#print('v,usr3',v,usr1)
#if ksgn!='code':
v=round(v,3)
#
return v
def tq_usrPoolsPut(upools,xcod,ksgn,dat):
usr1=upools.get(xcod)
if usr1==None:usr1={}
#
usr1[ksgn]=round(dat,3)
upools[xcod]=usr1
#
return upools
def tq_usrPoolsPutAll(upools,xcod,num9,dnum):
#tq_usrPoolsPut(upools,xcod,'code',xcod)
tq_usrPoolsPut(upools,xcod,'num9',num9)
#tq_usrPoolsPut(upools,xcod,'sum',sum9)
tq_usrPoolsPut(upools,xcod,'dnum',dnum)
#tq_usrPoolsPut(upools,xcod,'dsum',dsum)
#tq_usrPoolsPut(upools,xcod,'dprice',dprice)
#
return upools
def tq_usrPoolsInit(qx,addFg=False):
upools=qx.usrPools
for xcod in qx.stkCodeLst:
num9=0
if addFg:num9=tq_usrPoolsGet(upools,xcod,'num9')
#
tq_usrPoolsPutAll(upools,xcod,num9,0)
#
qx.usrPools=upools
return qx
def tq_usr2trdLib(qx):
xtim,upools=qx.wrkTimStr,qx.usrPools
r1=pd.Series(zsys.qx_trdNil,index=zsys.qx_trdName);
#
r1['ID']=tq_usrIDSet(qx)
r1['time'],r1['cash']=xtim,qx.usrMoney
#r1['upools']=qx.usrPools.copy()
r1['upools']=copy.deepcopy(upools)
#
qx.trdLib=qx.trdLib.append(r1.T,ignore_index=True)
#
#---------------------------user.stk
def tq_usrStkMerge(qx):
inx_cod,ksgn=qx.inxCodeLst[0],qx.priceSgn
df_inx,dfq=qx.inxPools[inx_cod],qx.trdLib
dfq.index=dfq['time']
#zt.prx('dfq',dfq)
#
df9=pd.DataFrame()
df9['inx'],df9['cash'],df9['total']=df_inx[ksgn],dfq['cash'],0
df9=df9.dropna()
#zt.prDF('df9',df9)
for xcod in qx.stkCodeLst:
df=qx.stkPools[xcod]
df9[xcod]=df[ksgn]
#
return df9
def tq_usrDatXed(qx,dfu):
df2=zdat.df_kcut8tim(dfu,'',qx.btTim0Str,qx.btTim9Str)
#
clst=qx.stkCodeLst
nlst=list(map(lambda x:x+'_num',clst))
mlst=list(map(lambda x:x+'_money',clst))
dlst=list(df2.columns);#+['stk-val']
xlst=dlst+nlst+mlst; #print('xlst',xlst)
#
df3=pd.DataFrame(columns=xlst)
df3[dlst]=df2
#
dfq=qx.trdLib
dfq=zdat.df_kcut8tim(qx.trdLib,'',qx.btTim0Str,qx.btTim9Str)
#dfq=dfq[dfq.index>=qx.btTim0St]
#dfq=dfq[dfq.index<=qx.btTim9Str]
#
for xsgn in nlst:
dfq[xsgn]=0
#
for xtim,row in dfq.iterrows():
upools=row['upools']
#print(xtim,'#',upools)
for xcod in upools:
xnum=tq_usrPoolsGet(upools,xcod,'num9')
df3.ix[xtim,xcod+'_num']=xnum
#
#
df3=df3.fillna(method='pad')
#
for xcod in clst:
mss,nss=xcod+'_money',xcod+'_num'
df3[mss]=df3[xcod]*df3[nss]
#
df3['stk-val']=df3[mlst].sum(axis=1)
df3['total']=df3['stk-val']+df3['cash']
#
#
#print('df3.tail()')
#print(df3.tail())
x=df3.tail(1)
x2=x['total'].values[0]
k=round(x2/qx.usrMoney0*100,2)
#
return df3,k
def tq_usrDatXedFill(qx,dfu):
#df2=zdat.df_kcut8tim(dfu,'',qx.btTim0Str,qx.btTim9Str)
#df2=dfu[dfu.index>=tim0Str]
#df2=df2[df2.index<=tim9Str]
#print('df2.tail()')
#print(df2.tail())
#
#xcod=qx.wrkInxCod
#df=qx.inxPools[xcod]
#
df9=pd.DataFrame()
clst=qx.stkCodeLst
mlst=list(map(lambda x:x+'_money',clst))
#
#df=qx.wrkInxDat
ksgn=qx.priceSgn
df9['inx']=qx.wrkInxDat[ksgn]
df9['cash']=dfu['cash']
#
for xcod in clst:
nss,mss=xcod+'_num',xcod+'_money'
#nss=xcod+'_num'
#df9[xcod],df9[nss],df9[mss]=dfu[xcod],dfu[nss],dfu[mss]
df=qx.stkPools[xcod]
df9[xcod],df9[nss]=df[ksgn],dfu[nss]
df9[mss]=df9[xcod]*df9[nss]
#
df9=df9.fillna(method='pad')
df9[mss]=df9[xcod]*df9[nss]
df9['stk-val']=df9[mlst].sum(axis=1)
df9['total']=df9['stk-val']+df9['cash']
#
df9=zdat.df_kcut8tim(df9,'',qx.btTim0Str,qx.btTim9Str)
#
xlst=['inx','total']+clst
#df9[xlst]=df9[xlst]
df=df9[xlst]
df.index=pd.DatetimeIndex(df.index)
#
return df
#------------
#---------------------------tick
def tick2x(df,ktim='1min'):
'''
ktim,是时间频率参数,请参看pandas的resample重新采样函数
常见时间频率符号:
A, year
M, month
W, week
D, day
H, hour
T, minute
S,second
'''
#
df['time']=pd.to_datetime(df['time'])
df=df.set_index('time')
df=df.sort_index()
#
dfk=df['price'].resample(ktim).ohlc();dfk=dfk.dropna();
vol2=df['volume'].resample(ktim).sum();vol2=vol2.dropna();
df_vol2=pd.DataFrame(vol2,columns=['volume'])
amt2=df['amount'].resample(ktim).sum();amt2=amt2.dropna();
df_amt2=pd.DataFrame(amt2,columns=['amount'])
#
df2=dfk.merge(df_vol2,left_index=True,right_index=True)
df9=df2.merge(df_amt2,left_index=True,right_index=True);
#
xtims=df9.index.format('%Y-%m-%d %H:%M:%S')
del(xtims[0])
df9['xtim']=xtims # df9.index.__str__();# [str(df9.index)]
#
return df9
#---------------------------ai.xxx
def ai_varRd(fmx0):
fvar=fmx0+'tqvar.pkl'
qx=zt.f_varRd(fvar)
for xkey in qx.aiMKeys:
fss=fmx0+xkey+'.mx'
mx=load_model(fss)
qx.aiModel[xkey]=mx
#
return qx
def ai_varWr(qx,fmx0):
fvar=fmx0+'tqvar.pkl'
mx9=qx.aiModel
qx.aiMKeys=list(mx9.keys())
qx.aiModel={}
zt.f_varWr(fvar,qx)
print('fvar,',fvar)
#
for xkey in mx9:
fss=fmx0+xkey+'.mx'
mx9[xkey].save(fss)
print('fmx,',fss)
#
qx.aiModel=mx9
#---------------------------ai.xxx
#---------------------------ai.dacc
def ai_acc_xed2x(y_true,y_pred,ky0=5,fgDebug=False):
'''
效果评估函数,用于评估机器学习算法函数的效果。
输入:
y_true,y_pred,pandas的Series数据列格式。
ky0,结果数据误差k值,默认是5,表示百分之五。
fgDebug,调试模式变量,默认为False。
返回:
dacc,准确率,float格式
df,结果数据,pandas列表格式DataFrame
'''
#1
df,dacc=pd.DataFrame(),-1
#print('n,',len(y_true),len(y_pred))
if (len(y_true)==0) or (len(y_pred)==0):
#print('n,',len(y_true),len(y_pred))
return dacc,df
#
y_num=len(y_true)
#df['y_true'],df['y_pred']=zdat.ds4x(y_true,df.index),zdat.ds4x(y_pred,df.index)
df['y_true'],df['y_pred']=pd.Series(y_true),pd.Series(y_pred)
df['y_diff']=np.abs(df.y_true-df.y_pred)
#2
df['y_true2']=df['y_true']
df.loc[df['y_true'] == 0, 'y_true2'] =0.00001
df['y_kdif']=df.y_diff/df.y_true2*100
#3
dfk=df[df.y_kdif<ky0]
knum=len(dfk['y_pred'])
dacc=knum/y_num*100
#
#5
dacc=round(dacc,3)
return dacc,df
def ai_acc_xed2ext(y_true,y_pred,ky0=5,fgDebug=False):
'''
效果评估函数,用于评估机器学习算法函数的效果。
输入:
y_true,y_pred,pandas的Series数据列格式。
ky0,结果数据误差k值,默认是5,表示百分之五。
fgDebug,调试模式变量,默认为False。
返回:
dacc,准确率,float格式
df,结果数据,pandas列表格式DataFrame
[dmae,dmse,drmse,dr2sc],各种扩充评估数据
'''
#1
df,dacc=pd.DataFrame(),-1
if (len(y_true)==0) or (len(y_pred)==0):
#print('n,',len(y_true),len(y_pred))
return dacc,df
#2
y_num=len(y_true)
#df['y_true'],df['y_pred']=zdat.ds4x(y_true,df.index),zdat.ds4x(y_pred,df.index)
df['y_true'],df['y_pred']=y_true,y_pred
df['y_diff']=np.abs(df.y_true-df.y_pred)
#3
df['y_true2']=df['y_true']
df.loc[df['y_true'] == 0, 'y_true2'] =0.00001
df['y_kdif']=df.y_diff/df.y_true2*100
#4
dfk=df[df.y_kdif<ky0]
knum=len(dfk['y_pred'])
dacc=knum/y_num*100
#
#5
dmae=metrics.mean_absolute_error(y_true, y_pred)
dmse=metrics.mean_squared_error(y_true, y_pred)
drmse=np.sqrt(metrics.mean_squared_error(y_true, y_pred))
dr2sc=metrics.r2_score(y_true,y_pred)
#
#6
if fgDebug:
#print('\nai_acc_xed')
#print(df.head())
#y_test,y_pred=df['y_test'],df['y_pred']
print('ky0={0}; n_df9,{1},n_dfk,{2}'.format(ky0,y_num,knum))
print('acc: {0:.2f}%; MSE:{1:.2f}, MAE:{2:.2f}, RMSE:{3:.2f}, r2score:{4:.2f}, @ky0:{5:.2f}'.format(dacc,dmse,dmae,drmse,dr2sc,ky0))
#
#7
dacc=round(dacc,3)
xlst=[dmae,dmse,drmse,dr2sc]
return dacc,df,xlst
#---------------------------ai.model.xxx
def ai_mul_var_tst(mx,df_train,df_test,nepochs=200,nsize=128,ky0=5):
x_train,y_train=df_train['x'].values,df_train['y'].values
x_test, y_test = df_test['x'].values,df_test['y'].values
#
mx.fit(x_train, y_train, epochs=nepochs, batch_size=nsize)
#
y_pred = mx.predict(x_test)
df_test['y_pred']=zdat.ds4x(y_pred,df_test.index,True)
dacc,_=ai_acc_xed2x(df_test.y,df_test['y_pred'],ky0,False)
#
return dacc
def ai_mx_tst_epochs(f_mx,f_tg,df_train,df_test,kepochs=100,nsize=128,ky0=5):
ds,df={},pd.DataFrame()
for xc in range(1,11):
print('\n#',xc)
dnum=xc*kepochs
mx=ks.models.load_model(f_mx)
t0=arrow.now()
dacc=ai_mul_var_tst(mx,df_train,df_test,dnum,nsize,ky0=ky0)
tn=zt.timNSec('',t0)
ds['nepoch'],ds['epoch_acc'],ds['ntim']=dnum,dacc,tn
df=df.append(ds,ignore_index=True)
#
df=df.dropna()
df['nepoch']=df['nepoch'].astype(int)
print('\ndf')
print(df)
print('\nf,',f_tg)
df.to_csv(f_tg,index=False)
#
df.plot(kind='bar',x='nepoch',y='epoch_acc',rot=0)
df.plot(kind='bar',x='nepoch',y='ntim',rot=0)
#
return df
def ai_mx_tst_bsize(f_mx,f_tg,df_train,df_test,nepochs=500,ksize=32,ky0=5):
ds,df={},pd.DataFrame()
for xc in range(1,11):
print('\n#',xc)
dnum=xc*ksize
mx=ks.models.load_model(f_mx)
t0=arrow.now()
dacc=ai_mul_var_tst(mx,df_train,df_test,nepochs,dnum,ky0=ky0)
tn=zt.timNSec('',t0)
ds['bsize'],ds['size_acc'],ds['ntim']=dnum,dacc,tn
df=df.append(ds,ignore_index=True)
#
df=df.dropna()
df['bsize']=df['bsize'].astype(int)
print('\ndf')
print(df)
print('\nf,',f_tg)
df.to_csv(f_tg,index=False)
#
df.plot(kind='bar',x='bsize',y='size_acc',rot=0)
df.plot(kind='bar',x='bsize',y='ntim',rot=0)
return df
def ai_mx_tst_kacc(f_mx,f_tg,df_train,df_test,nepochs=500,nsize=128):
ds,df={},pd.DataFrame()
for xc in range(1,11):
print('\n#',xc)
dnum=xc*1
mx=ks.models.load_model(f_mx)
dacc=ai_mul_var_tst(mx,df_train,df_test,nepochs,nsize,ky0=dnum)
ds['kacc'],ds['dacc']=dnum,dacc
df=df.append(ds,ignore_index=True)
#
df=df.dropna()
df['kacc']=df['kacc'].astype(int)
print('\ndf')
print(df)
print('\nf,',f_tg)
df.to_csv(f_tg,index=False)
#
df.plot(kind='bar',x='kacc',y='dacc',rot=0)
#
return df
| [
"[email protected]"
] | |
e7c6ef1f37a3c97baf924f1e1b774645219dce68 | 0e78b2df0fb93afc62684dece8ac05b700570248 | /BOJ/10950.py | 94b5b24e31f885c29d43c6d8b15d547aa553f987 | [] | no_license | ajy720/Algorithm | f1e2301327db09667ba011bc317c8f380707c25c | b141538802e9056f154ab91c816ad29500505f34 | refs/heads/master | 2022-05-06T21:37:05.780170 | 2022-04-23T09:25:52 | 2022-04-23T09:25:52 | 200,335,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | n = int(input())
res = []
for i in range(n):
a, b = map(int, input().split())
res.append(a+b)
for i in range(n):
print(res[i])
| [
"[email protected]"
] | |
9eaaae7e015f7af1d6d99117f0f6bac098baf788 | 5bec846696ea2c198186f492baec4ed6b1de8aae | /detect/eval/voc_eval.py | cb30858f545329e942e2b60217ca5361465088b4 | [] | no_license | Peiiii/lpr | 0e268e1ff71ae37d01a3501c608f4a8024df6dd2 | 90bcbdee4555915b77dd6c6dab2b48ed56c9952d | refs/heads/master | 2022-12-11T23:57:37.075730 | 2019-10-02T15:25:09 | 2019-10-02T15:25:09 | 210,851,442 | 0 | 0 | null | 2022-11-21T21:32:26 | 2019-09-25T13:24:36 | Python | UTF-8 | Python | false | false | 7,137 | py | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
import xml.etree.ElementTree as ET
import os
import pickle as cPickle
import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print ('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print ('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'w') as f:
cPickle.dump(recs, f)
else:
# load
with open(cachefile, 'r') as f:
recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| [
"[email protected]"
] | |
c37b3f1f31f82758423901149d2f6c52870759a6 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/33SearchinRotatedSortedArray.py | 8947296c9b426239174e8e74045feff557580a62 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 1,248 | py | # coding=utf-8
'''
Created on 2016�12�22�
@author: Administrator
'''
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def find_min_idx(nums):
lo, hi = 0, len(nums) - 1
while lo < hi:
if nums[lo] < nums[hi]:
return lo
mid = (lo + hi) / 2
if nums[mid] < nums[hi]:
hi = mid
else:
lo = mid + 1
return lo
min_idx = find_min_idx(nums)
def search(nums, s, e, target):
while s <= e:
mid = (s + e) / 2
if target < nums[mid]:
e = mid - 1
elif target > nums[mid]:
s = mid + 1
else:
return mid
return None
ans = search(nums, 0, min_idx - 1, target)
if ans != None: return ans
ans = search(nums, min_idx, len(nums) - 1, target)
if ans != None:
return ans
else:
return -1
nums = [1]
target = 1
print Solution().search(nums, target)
| [
"[email protected]"
] | |
24fd164938cb979b18d12711c9ca1fcc2cadfa53 | 07b4dd9a88f3404c4851ea7cbb57c67035bc9a54 | /tables.py | 8583928d290312a85da78b29569f435b41ae38a5 | [] | no_license | surajgholap/python-Misc | 9c9d02c42bb37b7378d7336343f8bef7cd802edf | 4a8ce4bfa5a959692d98663b7b5c0b67a165835f | refs/heads/master | 2021-06-17T19:19:25.021038 | 2021-01-27T20:54:03 | 2021-01-27T20:54:03 | 142,781,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def print_tables(base, times):
for i in range(1, base+1):
for j in range(1, times+1):
print(i*j, end=" ")
print()
print_tables(12, 12)
| [
"[email protected]"
] | |
703f70a906c0d25b1b21f4c05a311f1a735b51eb | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py | 688315fd12e6f3b07e97cd1fc273c6ed725de5ca | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 5,025 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for sparse cross operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_sparse_feature_cross_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so"))
# Default hash key for the FingerprintCat64.
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE
@deprecated_arg_values(
"2016-11-20",
"The default behavior of sparse_feature_cross is changing, the default\n"
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n"
"From that point on sparse_feature_cross will always use FingerprintCat64\n"
"to concatenate the feature fingerprints. And the underlying\n"
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n"
"as deprecated.",
hash_key=None)
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
name=None, hash_key=None):
"""Crosses a list of Tensor or SparseTensor objects.
See sparse_feature_cross_kernel.cc for more details.
Args:
inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
name: A name prefix for the returned tensors (optional).
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
The default value is None, but will become
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY after 2016-11-20 (optional).
Returns:
A `SparseTensor` with the crossed features.
Return type is string if hashed_output=False, int64 otherwise.
Raises:
TypeError: If the inputs aren't either SparseTensor or Tensor.
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, sparse_tensor.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs
if isinstance(i, sparse_tensor.SparseTensor)]
dense_inputs = [i for i in inputs
if not isinstance(i, sparse_tensor.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
if hash_key:
indices_out, values_out, shape_out = (
_sparse_feature_cross_op.sparse_feature_cross_v2(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
hash_key=hash_key,
out_type=out_type,
internal_type=internal_type,
name=name))
else:
indices_out, values_out, shape_out = (
_sparse_feature_cross_op.sparse_feature_cross(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
out_type=out_type,
internal_type=internal_type,
name=name))
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
ops.NotDifferentiable("SparseFeatureCross")
ops.NotDifferentiable("SparseFeatureCrossV2")
| [
"[email protected]"
] | |
6c7a1cd28299eeeddc802b36c228d41fdab88e8c | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /python/basic/class.py | f5dbcf0e2b60d201d45824592934ddc0e4c39888 | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/python
class TTest:
x= 10
def __init__(self):
self.y= None
def __repr__(self):
return 'x=%r y=%r' % (self.x, self.y)
class TTestB:
x= 10
y= None
test1= TTest()
test1.x= 20
test1.y= 200
test2= TTest()
print 'test1=',test1
print 'test2=',test2
TTest.x= 30
TTest.y= 300
test3= TTest()
print 'test3=',test3
print '-----'
test1= TTestB()
test1.x= 20
test1.y= 200
test2= TTestB()
print 'test1=',test1.x,test1.y
print 'test2=',test2.x,test2.y
TTestB.x= 30
TTestB.y= 300
test3= TTestB()
print 'test2=',test3.x,test3.y
| [
"[email protected]"
] | |
fc98453f489a4a248f7440bdcc7b9b95490ea51e | 097dda217c3d31b69cb309369dc0357fe0f229ab | /app/customadmin/migrations/0005_shopproduct.py | 38f7c55e8acce58b5eb16fcb194cc2e69f1c26f6 | [] | no_license | Jaycitrusbug/book-python | 57a96ee343eee5b63ca5f7ee2461db82426321b5 | b5a4de74c9114546ee03b8aa5de1381719ddf74e | refs/heads/master | 2023-06-20T01:52:29.484415 | 2021-07-16T13:06:05 | 2021-07-16T13:06:05 | 386,638,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | # Generated by Django 3.1.4 on 2020-12-09 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customadmin', '0004_auto_20201209_1008'),
]
operations = [
migrations.CreateModel(
name='ShopProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Date when created.', null=True, verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Date when updated.', null=True, verbose_name='Updated At')),
('product_image', models.ImageField(blank=True, null=True, upload_to='products', verbose_name='Product images')),
('name', models.CharField(blank=True, default='', max_length=200, null=True)),
('price', models.TextField(blank=True, default='', max_length=500, null=True)),
('detail', models.CharField(blank=True, default='', max_length=255, null=True)),
],
options={
'verbose_name': 'Shop Product',
'verbose_name_plural': 'Shop Products',
'ordering': ['-created_at'],
},
),
]
| [
"[email protected]"
] | |
b2583d170c8144e89f9ed5ffc15ded383410cb49 | 534570bbb873293bd2646a1567b63d162fbba13c | /Python/Data Structure/Linear List/Array/K Sum/259.3-sum-smaller.py | d38c68bf8ab2e63a5ee2b79f4cefc4222690339d | [] | no_license | XinheLIU/Coding-Interview | fa3df0f7167fb1bc6c8831748249ebaa6f164552 | d6034c567cef252cfafca697aa316c7ad4e7d128 | refs/heads/master | 2022-09-17T14:30:54.371370 | 2022-08-19T15:53:35 | 2022-08-19T15:53:35 | 146,382,499 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | class Solution:
def threeSumSmaller(self, nums: List[int], target: int) -> int:
nums.sort()
ret = 0
for i in range(0, len(nums)-2):
l, r = i + 1, len(nums) - 1
while l < r:
Sum = nums[i] + nums[l] + nums[r]
if Sum < target:
ret += r - l
l += 1
else:
r -= 1
return ret | [
"[email protected]"
] | |
94df604e74040fe35e0f339fc89e6977d72911ab | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/budget_campaign_association_status.py | a3c9d3bbc4cb3eedea948a2bd7839c61f472bdb3 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'BudgetCampaignAssociationStatusEnum',
},
)
class BudgetCampaignAssociationStatusEnum(proto.Message):
r"""Message describing the status of the association between the
Budget and the Campaign.
"""
class BudgetCampaignAssociationStatus(proto.Enum):
r"""Possible statuses of the association between the Budget and
the Campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
79f1cf41b9b75f0519be704b7f70b2277ae2a03c | e0ff22c17eaa1e7009089a58575567b0ead63d49 | /scan/forms.py | 96def1cf32e59163612c71d895e7389098144f02 | [] | no_license | slehaff/dblive | afcb5297057ad4d78177b886013250d2ed068424 | 7f7bdf38998bd65d00e5ac7df3ef5289b2781e83 | refs/heads/master | 2023-05-27T16:38:21.865344 | 2021-06-07T10:09:28 | 2021-06-07T10:09:28 | 303,078,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | from PIL import Image
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class PicForm2(forms.Form):
deviceid = forms.CharField(required=False, max_length=32, strip=True)
cmd = forms.CharField(required=False, label='Cmd', max_length=50, strip=True)
Picture = forms.ImageField(label="Billed", required=False)
Pic1 = forms.ImageField(label="Billed1", required=False)
Pic2 = forms.ImageField(label="Billed2", required=False)
Pic3 = forms.ImageField(label="Billed3", required=False)
# required skal ændres på sigt
def __init__(self, *args, **kwargs):
super(PicForm2, self).__init__(*args, **kwargs)
self.helper = FormHelper()
#self.helper.form_class = 'form-horizontal'
#self.helper.label_class = 'col-sm-5 col-sm-offset-2' # control-label
#self.helper.field_class = 'col-sm-4'
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Send'))
self.helper.add_input(Submit('cancel', 'Fortryd', css_class='btn-secondary', formnovalidate='formnovalidate', formaction='/')) | [
"[email protected]"
] | |
799ab743daf9364572de676772d64f9a54091f81 | 71b8849e3bd0441dc2d7a6a004b284c11540aca6 | /aldobaran/analysis/dgr_width_calcs/aldobaran_analysis_likelihood_iterative_mask_binning.py | 4c7c08f53d774aa960be6e9011bdbe5f987abebf | [] | no_license | ezbc/scripts_and_logs | bdcf98233b89cff6296a8d0d962c78283bfc8990 | a66d24c5d1e8d464a0f38d8a2eec4f594e7ea357 | refs/heads/master | 2021-01-10T12:10:26.488731 | 2016-04-27T19:46:02 | 2016-04-27T19:46:02 | 51,485,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80,071 | py | import matplotlib
matplotlib.use('Agg')
import warnings
warnings.filterwarnings('ignore')
''' Plotting Functions
'''
def plot_likelihoods(likelihoods,velocity_centers,velocity_widths,
filename=None,show=True, returnimage=False):
''' Plots a heat map of likelihoodelation values as a function of velocity width
and velocity center.
'''
# Import external modules
import numpy as np
import math
from mpl_toolkits.axes_grid1 import ImageGrid
import pyfits as pf
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Set up plot aesthetics
plt.clf()
plt.rcdefaults()
colormap = plt.cm.gist_ncar
#color_cycle = [colormap(i) for i in np.linspace(0, 0.9, len(flux_list))]
font_scale = 8
params = {#'backend': .pdf',
'axes.labelsize': font_scale,
'axes.titlesize': font_scale,
'text.fontsize': font_scale,
'legend.fontsize': font_scale * 3 / 4.0,
'xtick.labelsize': font_scale,
'ytick.labelsize': font_scale,
'font.weight': 500,
'axes.labelweight': 500,
'text.usetex': False,
#'figure.figsize': (8, 8 ),
#'axes.color_cycle': color_cycle # colors of different plots
}
plt.rcParams.update(params)
fig = plt.figure(figsize=(3,2))
imagegrid = ImageGrid(fig, (1,1,1),
nrows_ncols=(1,1),
ngrids=1,
cbar_mode="single",
cbar_location='right',
cbar_pad="3%",
cbar_size='6%',
axes_pad=0,
aspect=False,
label_mode='L',
share_all=False)
# Unravel the likelihoods if raveled
if len(likelihoods.shape) == 1:
likelihoods = np.empty((velocity_centers.shape[0],
velocity_widths.shape[0]))
likelihoods[:,:] = np.NaN
count = 0
try:
for i, center in enumerate(velocity_centers):
for j, width in enumerate(velocity_widths):
likelihoods[i,j] = likelihoods[count]
count += 1
except IndexError:
print(' plot_likelihoods: O-d array input, cannot proceed')
else:
likelihoods = likelihoods
image = np.ma.array(likelihoods, mask=np.isnan(likelihoods))
ax = imagegrid[0]
ax.set_xlabel('Velocity Width (km/s)')
ax.set_ylabel('Velocity Center (km/s)')
#ax.set_xticks(np.arange(0,velocity_widths.shape[0],1)[::5],
# velocity_centers[::5])
plt.rc('text', usetex=False)
im = ax.imshow(image, interpolation='nearest', origin='lower',
extent=[velocity_widths[0],velocity_widths[-1],
velocity_centers[0],velocity_centers[-1]],
cmap=plt.cm.gist_stern,
#cmap=plt.cm.gray,
#norm=matplotlib.colors.LogNorm(),
)
cb = ax.cax.colorbar(im)
#cb.set_clim(vmin=0.)
# Write label to colorbar
cb.set_label_text(r'log L')
fractions = np.array([0.95, 0.68])
levels = (1 + fractions * image.min())
cs = ax.contour(image, levels=levels, origin='lower',
extent=[velocity_widths[0],velocity_widths[-1],
velocity_centers[0],velocity_centers[-1]],
colors='k'
)
# Define a class that forces representation of float to look a certain way
# This remove trailing zero so '1.0' becomes '1'
class nf(float):
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1]=='0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
# Recast levels to new class
cs.levels = [nf(val) for val in fractions*100.0]
#fmt = {}
#for level, fraction in zip(cs.levels, fractions):
# fmt[level] = fraction
fmt = '%r %%'
ax.clabel(cs, cs.levels, fmt=fmt, fontsize=9, inline=1)
if filename is not None:
plt.savefig(filename,bbox_inches='tight')
if show:
plt.draw()
plt.show()
if returnimage:
return likelihoods
def plot_likelihoods_hist(global_props, filename=None, show=True,
returnimage=False, plot_axes=('centers', 'widths'),
contour_confs=None):
''' Plots a heat map of likelihoodelation values as a function of velocity width
and velocity center.
'''
# Import external modules
import numpy as np
import math
from mpl_toolkits.axes_grid1 import ImageGrid
import pyfits as pf
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Set up plot aesthetics
# ----------------------
plt.close;plt.clf()
plt.rcdefaults()
# Color map
cmap = plt.cm.gnuplot
# Color cycle, grabs colors from cmap
color_cycle = [cmap(i) for i in np.linspace(0, 0.8, 2)]
font_scale = 9
line_weight = 600
font_weight = 600
params = {
'axes.color_cycle': color_cycle, # colors of different plots
'axes.labelsize': font_scale,
'axes.titlesize': font_scale,
#'axes.weight': line_weight,
'axes.linewidth': 1.2,
'axes.labelweight': font_weight,
'legend.fontsize': font_scale*3/4,
'xtick.labelsize': font_scale,
'ytick.labelsize': font_scale,
'font.weight': font_weight,
'font.serif': 'computer modern roman',
'text.fontsize': font_scale,
'text.usetex': True,
'text.latex.preamble': r'\usepackage[T1]{fontenc}',
#'font.family': 'sans-serif',
'figure.figsize': (3.6, 3.6),
'figure.dpi': 600,
'backend' : 'pdf',
#'figure.titlesize': font_scale,
}
plt.rcParams.update(params)
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{cmbright}",
]
}
plt.rcParams.update(pgf_with_pdflatex)
fig, ax_image = plt.subplots()
if plot_axes[0] == 'widths':
x_grid = global_props['vel_widths']
x_confint = (global_props['hi_velocity_width']['value'],
global_props['hi_velocity_width_error']['value'][0],
global_props['hi_velocity_width_error']['value'][1],
)
x_extent = x_grid[0], x_grid[-1]
ax_image.set_xlabel(r'Velocity Width [km/s]')
x_sum_axes = 1
y_pdf_label = r'Width PDF'
x_limits = (x_grid[0], x_grid[-1])
if plot_axes[1] == 'dgrs':
y_grid = global_props['dgrs']
y_confint = (global_props['dust2gas_ratio']['value'],
global_props['dust2gas_ratio_error']['value'][0],
global_props['dust2gas_ratio_error']['value'][1],
)
y_extent = y_grid[0], y_grid[-1]
ax_image.set_ylabel(r'DGR [10$^{-20}$ cm$^2$ mag]')
y_sum_axes = 0
x_pdf_label = r'DGR PDF'
y_limits = (y_grid[0], y_grid[-1])
# Create axes
sum_axes = np.array((x_sum_axes, y_sum_axes))
sum_axis = np.argmax(np.bincount(np.ravel(sum_axes)))
# Mask NaNs
likelihoods = np.sum(global_props['likelihoods'], axis=(2))
image = np.ma.array(likelihoods, mask=np.isnan(likelihoods))
# Create likelihood image
#image = np.sum(likelihoods, axis=sum_axis) / np.sum(likelihoods)
image = likelihoods / np.sum(likelihoods)
# Derive marginal distributions of both centers and widths
x_sum = np.sum(likelihoods, axis=x_sum_axes)
x_pdf = x_sum / np.sum(x_sum)
y_sum = np.sum(likelihoods, axis=y_sum_axes)
y_pdf = y_sum / np.sum(y_sum)
extent = np.ravel(np.array((x_extent, y_extent)))
#plt.rc('text', usetex=False)
im = ax_image.imshow(image.T, interpolation='nearest', origin='lower',
extent=extent,
#cmap=plt.cm.gist_stern,
#cmap=plt.cm.gray,
cmap=plt.cm.binary,
#norm=matplotlib.colors.LogNorm(),
aspect='auto',
)
show_pdfs = 1
if show_pdfs:
divider = make_axes_locatable(ax_image)
ax_pdf_x = divider.append_axes("top", 0.6, pad=0.1, sharex=ax_image)
ax_pdf_y = divider.append_axes("right", 0.6, pad=0.1,
sharey=ax_image)
# make some labels invisible
plt.setp(ax_pdf_x.get_xticklabels() + \
ax_pdf_y.get_yticklabels(),
visible=False)
ax_pdf_x.plot(x_grid,
x_pdf,
color='k',
drawstyle='steps-mid',
linewidth=2,
)
ax_pdf_y.plot(y_pdf,
y_grid,
color='k',
drawstyle='steps-mid',
linewidth=2,
)
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
# Tick marks on the pdf?
pdf_ticks = False
for tl in ax_pdf_x.get_xticklabels():
tl.set_visible(False)
if pdf_ticks:
wmax = x_pdf.max()
ticks = [0, 0.5*wmax, 1.0*wmax]
tick_labels = ['{0:.1f}'.format(ticks[0]),
'{0:.1f}'.format(ticks[1]),
'{0:.1f}'.format(ticks[2]),
]
ax_pdf_x.set_yticks(ticks)
ax_pdf_x.set_yticklabels(tick_labels)
else:
for tl in ax_pdf_x.get_yticklabels():
tl.set_visible(False)
ax_pdf_x.set_ylabel(y_pdf_label)
for tl in ax_pdf_y.get_yticklabels():
tl.set_visible(False)
if pdf_ticks:
cmax = y_pdf.max()
ticks = [0, 0.5*cmax, 1.0*cmax]
tick_labels = ['{0:.1f}'.format(ticks[0]),
'{0:.1f}'.format(ticks[1]),
'{0:.1f}'.format(ticks[2]),
]
ax_pdf_y.set_xticks(ticks)
ax_pdf_y.set_xticklabels(tick_labels)
else:
for tl in ax_pdf_y.get_xticklabels():
tl.set_visible(False)
ax_pdf_y.set_xlabel(x_pdf_label)
# Show confidence limits
if y_confint is not None:
ax_pdf_y.axhspan(y_confint[0] - y_confint[1],
y_confint[0] + y_confint[2],
color='k',
linewidth=1,
alpha=0.2)
ax_pdf_y.axhline(y_confint[0],
color='k',
linestyle='--',
linewidth=3,
alpha=1)
if x_confint is not None:
ax_pdf_x.axvspan(x_confint[0] - x_confint[1],
x_confint[0] + x_confint[2],
color='k',
linewidth=1,
alpha=0.2)
ax_pdf_x.axvline(x_confint[0],
color='k',
linestyle='--',
linewidth=3,
alpha=1)
#cb.set_clim(vmin=0.)
# Write label to colorbar
#cb.set_label_text(r'log L')
# Plot contours
if contour_confs is not None:
fractions = (1.0 - np.asarray(contour_confs))
levels = (fractions * image.max())
cs = ax_image.contour(image.T, levels=levels, origin='lower',
extent=extent,
colors='k'
)
# Define a class that forces representation of float to look a certain
# way This remove trailing zero so '1.0' becomes '1'
class nf(float):
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1]=='0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
# Recast levels to new class
cs.levels = [nf(val) for val in np.asarray(contour_confs)*100.0]
#fmt = {}
#for level, fraction in zip(cs.levels, fractions):
# fmt[level] = fraction
fmt = '%r %%'
ax_image.clabel(cs, cs.levels, fmt=fmt, fontsize=9, inline=1)
try:
ax_image.set_xlim(x_limits)
ax_image.set_ylim(y_limits)
except UnboundLocalError:
pass
if 0:
#if npix is not None or av_threshold is not None:
text = ''
if npix is not None:
text += r'N$_{\rm pix}$ = ' + \
'{0:.0f}'.format(npix)
if av_threshold is not None:
text += '\n'
if av_threshold is not None:
text += r'$A_V$ threshold = {0:.1f} mag'.format(av_threshold)
text += '\n'
text += r'DGR = {0:.2f} '.format(y_confint[0]) + \
r'$\times$ 10$^{-20}$ (cm$^2$ mag$^1$)'
text += '\n'
text += r'Velocity width = {0:.2f} '.format(x_confint[0]) + \
r'km/s'
ax_image.annotate(text,
xytext=(0.95, 0.95),
xy=(0.95, 0.95),
textcoords='axes fraction',
xycoords='axes fraction',
color='k',
fontsize=font_scale*0.75,
bbox=dict(boxstyle='round',
facecolor='w',
alpha=0.3),
horizontalalignment='right',
verticalalignment='top',
)
if filename is not None:
plt.draw()
plt.savefig(filename, bbox_inches='tight')
if show:
plt.draw()
plt.show()
if returnimage:
return likelihoods
def plot_av_image(av_image=None, header=None, title=None,
limits=None, savedir='./', filename=None, show=True):
# Import external modules
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
import pyfits as pf
import matplotlib.pyplot as plt
import pywcsgrid2 as wcs
import pywcs
from pylab import cm # colormaps
from matplotlib.patches import Polygon
# Set up plot aesthetics
plt.clf()
plt.rcdefaults()
colormap = plt.cm.gist_ncar
#color_cycle = [colormap(i) for i in np.linspace(0, 0.9, len(flux_list))]
font_scale = 15
params = {
'axes.color_cycle': color_cycle, # colors of different plots
'axes.labelsize': font_scale,
'axes.titlesize': font_scale,
#'axes.weight': line_weight,
'axes.linewidth': 1.2,
'axes.labelweight': font_weight,
'legend.fontsize': font_scale*3/4,
'xtick.labelsize': font_scale,
'ytick.labelsize': font_scale,
'font.weight': font_weight,
'font.serif': 'computer modern roman',
'text.fontsize': font_scale,
'text.usetex': True,
'text.latex.preamble': r'\usepackage[T1]{fontenc}',
#'font.family': 'sans-serif',
'figure.figsize': (3.6, 3.6),
'figure.dpi': 600,
'backend' : 'pdf',
#'figure.titlesize': font_scale,
}
plt.rcParams.update(params)
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{cmbright}",
]
}
plt.rcParams.update(pgf_with_pdflatex)
# Create figure instance
fig = plt.figure()
nrows_ncols=(1,1)
ngrids=1
imagegrid = ImageGrid(fig, (1,1,1),
nrows_ncols=nrows_ncols,
ngrids=ngrids,
cbar_mode="each",
cbar_location='right',
cbar_pad="2%",
cbar_size='3%',
axes_pad=1,
axes_class=(wcs.Axes,
dict(header=header)),
aspect=True,
label_mode='L',
share_all=True)
# create axes
ax = imagegrid[0]
cmap = cm.jet # colormap
# show the image
im = ax.imshow(av_image,
interpolation='nearest',origin='lower',
cmap=cmap,
#norm=matplotlib.colors.LogNorm()
vmin=0,
vmax=1.4
)
# Asthetics
ax.set_display_coord_system("fk5")
ax.set_ticklabel_type("hms", "dms")
ax.set_xlabel('Right Ascension (J2000)',)
ax.set_ylabel('Declination (J2000)',)
# colorbar
cb = ax.cax.colorbar(im)
cmap.set_bad(color='w')
# plot limits
if limits is not None:
ax.set_xlim(limits[0],limits[2])
ax.set_ylim(limits[1],limits[3])
# Write label to colorbar
cb.set_label_text(r'A$_V$ (Mag)',)
if title is not None:
fig.suptitle(title, fontsize=font_scale)
if filename is not None:
plt.savefig(savedir + filename, bbox_inches='tight')
if show:
fig.show()
def plot_mask_residuals(residuals=None, x_fit=None, y_fit=None,
residual_thres=None, filename=None, show=True, title=None):
# Import external modules
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.integrate import simps as integrate
# Set up plot aesthetics
# ----------------------
plt.close;plt.clf()
plt.rcdefaults()
# Color map
cmap = plt.cm.gnuplot
# Color cycle, grabs colors from cmap
color_cycle = [cmap(i) for i in np.linspace(0, 0.8, 2)]
font_scale = 9
line_weight = 600
font_weight = 600
params = {
'axes.color_cycle': color_cycle, # colors of different plots
'axes.labelsize': font_scale,
'axes.titlesize': font_scale,
#'axes.weight': line_weight,
'axes.linewidth': 1.2,
'axes.labelweight': font_weight,
'legend.fontsize': font_scale*3/4,
'xtick.labelsize': font_scale,
'ytick.labelsize': font_scale,
'font.weight': font_weight,
'font.serif': 'computer modern roman',
'text.fontsize': font_scale,
'text.usetex': True,
'text.latex.preamble': r'\usepackage[T1]{fontenc}',
#'font.family': 'sans-serif',
'figure.figsize': (3.6, 3.6),
'figure.dpi': 600,
'backend' : 'pdf',
#'figure.titlesize': font_scale,
}
plt.rcParams.update(params)
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{cmbright}",
]
}
plt.rcParams.update(pgf_with_pdflatex)
# Create figure instance
fig = plt.figure()
ax = fig.add_subplot(111)
counts, bin_edges = \
np.histogram(np.ravel(residuals[~np.isnan(residuals)]),
bins=1000,
)
bin_edges_ext = np.zeros(len(counts) + 1)
counts_ext = np.zeros(len(counts) + 1)
bin_edges_ext[0] = bin_edges[0] - (bin_edges[1] - bin_edges[0])
bin_edges_ext[1:] = bin_edges[:-1]
counts_ext[0] = 0
counts_ext[1:] = counts
# Normalize so area = 1
#counts_ext /= np.nansum(counts_ext) * (bin_edges_ext[2] - bin_edges_ext[1])
counts_ext = counts_ext / integrate(counts_ext, x=bin_edges_ext)
y_fit /= np.max(y_fit)
y_fit *= np.max(counts_ext)
print('max counts', np.max(counts_ext))
ax.plot(bin_edges_ext, counts_ext, drawstyle='steps-mid',
linewidth=1.5)
ax.plot(x_fit, y_fit,
linewidth=2,
alpha=0.6)
ax.set_xlim([np.nanmin(bin_edges_ext) - \
np.abs(0.8 * np.nanmin(bin_edges_ext)),4])
ax.set_ylim([-0.1, 1.1])
ax.axvline(residual_thres,
color='k',
linestyle='--',
linewidth=1.5)
ax.set_xlabel(r'Residual $A_V$ [mag]')
ax.set_ylabel('Normalized PDF')
if title is not None:
fig.suptitle(title, fontsize=font_scale)
if filename is not None:
plt.savefig(filename, bbox_inches='tight', dpi=600)
if show:
plt.show()
''' Calculations
'''
def calc_logL(model, data, data_error=None, weights=None):
'''
Calculates log likelihood
http://www.physics.utah.edu/~detar/phys6720/handouts/curve_fit/curve_fit/node2.html
'''
import numpy as np
if data_error is None:
data_error = np.std(data)
if weights is None:
weights = 1.0
logL = -np.sum((data - model)**2 / (2 * (data_error)**2))
return logL
def threshold_area(x, y, area_fraction=0.68):
'''
Finds the limits of a 1D array which includes a given fraction of the
integrated data.
Parameters
----------
data : array-like
1D array.
area_fraction : float
Fraction of area.
Returns
-------
limits : tuple
Lower and upper bound including fraction of area.
'''
import numpy as np
from scipy.integrate import simps as integrate
# Check if size of data
if x.size == 1:
return x[0], 0, 0
# Step for lowering threshold
step = (np.max(y) - np.median(y)) / 10000.0
# initial threshold
threshold = np.max(y) - step
threshold_area = 0.0
# area under whole function
area = integrate(y, x)
# Stop when the area below the threshold is greater than the max area
while threshold_area < area * area_fraction and threshold > 0:
threshold_indices = np.where(y > threshold)[0]
try:
bounds_indices = (threshold_indices[0], threshold_indices[-1])
except IndexError:
bounds_indices = ()
try:
threshold_area = integrate(y[bounds_indices[0]:bounds_indices[1]],
x[bounds_indices[0]:bounds_indices[1]])
threshold_area += threshold * (x[bounds_indices[1]] - \
x[bounds_indices[0]])
except IndexError:
threshold_area = 0
threshold -= step
if threshold < 0:
bounds_indices = (0, len(x) - 1)
x_peak = x[y == y.max()][0]
low_error, up_error = x_peak - x[bounds_indices[0]], \
x[bounds_indices[1]] - x_peak
return (x_peak, low_error, up_error)
def write_mle_tofits(filename='', vel_widths=None, dgrs=None,
likelihoods=None, clobber=False):
from astropy.io import fits
print('Writing likelihood grid to file:')
print(filename)
header = fits.Header()
header['NAXIS'] = 2
header['CTYPE1'] = 'WIDTHS'
header['CTYPE2'] = 'DGR'
header['CRPIX1'] = 0
header['CRPIX2'] = 0
header['CRVAL1'] = vel_widths[0]
header['CRVAL2'] = dgrs[0]
try:
header['CDELT1'] = vel_widths[1] - vel_widths[0]
except IndexError:
header['CDELT1'] = 1
try:
header['CDELT2'] = dgrs[1] - dgrs[0]
except IndexError:
header['CDELT2'] = 1
fits.writeto(filename,
likelihoods,
header,
clobber=clobber)
def gauss(x, width, amp, x0):
import numpy as np
return amp * np.exp(-(x - x0)**2 / (2 * width**2))
def get_residual_mask(residuals, resid_width_scale=3.0, plot_progress=False,
results_filename=None):
'''
'''
import numpy as np
from scipy.optimize import curve_fit, minimize
# Fit the rising portion of the residuals
residuals_crop = residuals[(residuals < 0) & \
#(residuals > -1.5) & \
~np.isnan(residuals)]
counts, bin_edges = np.histogram(np.ravel(residuals_crop),
bins=100,
)
p0=(2, np.nanmax(counts), 0)
if 0:
fit_params = curve_fit(gauss,
bin_edges[:-1],
counts,
p0=p0,
maxfev=1000000,
)[0]
elif 1:
from lmfit import minimize, Parameters
# Set parameter limits and initial guesses
params = Parameters()
params.add('width',
value=p0[0],
min=0.1,
max=10,
)
params.add('amp',
value=p0[1],
min=0,
max=2 * np.nanmax(counts),
)
params.add('x0',
value=p0[2],
min=-4,
max=4,
)
def norm(params, bin_edges, counts):
width = params['width'].value
amp = params['amp'].value
x0 = params['x0'].value
model = gauss(bin_edges, width, amp, x0)
norm = np.sum((counts - model)**2)
return norm
# Perform the fit!
result = minimize(norm,
params,
args=(bin_edges[:-1], counts),
method='lbfgsb')
fit_params = (params['width'].value, params['amp'].value,
params['x0'].value)
else:
bounds = ((0, 10), (0, 5 * np.nanmax(counts)), (-10, 10))
fit_params = minimize(gauss,
counts,
method='L-BFGS-B',
bounds=bounds,)
# Include only residuals within 3 sigma
residual_thres = resid_width_scale * np.abs(fit_params[0]) + fit_params[2]
mask = residuals > residual_thres
import matplotlib.pyplot as plt
plt.clf(); plt.close();
x_fit = np.linspace(np.nanmin(residuals),
np.nanmax(residuals),
1000)
y_fit = gauss(x_fit, *fit_params)
plt.plot(bin_edges[:-1], counts)
plt.plot(x_fit, y_fit)
plt.savefig('/usr/users/ezbc/Desktop/residuals.png')
if results_filename is not None:
x_fit = np.linspace(np.nanmin(residuals),
np.nanmax(residuals),
1000)
y_fit = gauss(x_fit, *fit_params)
y_fit / np.nanmax(residuals)
print('\nSaving residual mask PDF figure to\n' + results_filename)
plot_mask_residuals(residuals=residuals,
x_fit=x_fit,
y_fit=y_fit,
residual_thres=residual_thres,
filename=results_filename,
show=plot_progress)
plot_mask_residuals(residuals=residuals,
x_fit=x_fit,
y_fit=y_fit,
residual_thres=residual_thres,
filename=results_filename.replace('.pdf', '.png'),
show=plot_progress)
return mask
def iterate_residual_masking(
nhi_image=None,
nhi_image_error=None,
av_data=None,
av_data_error=None,
init_mask=None,
vel_range=None,
dgrs=None,
intercepts=None,
threshold_delta_dgr=None,
resid_width_scale=3.0,
plot_progress=False,
results_filename=None,
verbose=False,
):
'''
Returns
-------
av_model : numpy array
mask : numpy array
dgr : float
'''
import numpy as np
# Mask out nans
mask = (np.isnan(av_data) | \
np.isnan(av_data_error) | \
(av_data_error == 0) | \
np.isnan(nhi_image) | \
np.isnan(nhi_image_error) | \
(nhi_image_error == 0))
# Apply initial mask to exclude throughout process
if init_mask is not None:
mask += init_mask
# solve for DGR using linear least squares
print('\nBeginning iterative DGR calculations + masking...')
# Iterate masking pixels which are correlated and rederiving a linear least
# squares solution for the DGR
# -------------------------------------------------------------------------
use_intercept = True
delta_dgr = 1e10
dgr = 1e10
iteration = 0
while delta_dgr > threshold_delta_dgr:
if 0:
N = len(np.ravel(nhi_image[~mask]))
if use_intercept:
A = np.array((np.ones(N),
np.ravel(nhi_image[~mask] / \
nhi_image_error[~mask]),))
else:
A = np.array((np.ravel(nhi_image[~mask] / \
nhi_image_error[~mask]),))
b = np.array((np.ravel(av_data[~mask] / av_data_error[~mask]),))
A = np.matrix(A).T
b = np.matrix(b).T
a = (np.linalg.pinv(A) * b)
if use_intercept:
intercept = a[0, 0]
dgr_new = a[1, 0]
else:
dgr_new = a[0, 0]
intercept = 0
else:
results = calc_likelihoods(
nhi_image=nhi_image[~mask],
av_image=av_data[~mask],
av_image_error=av_data_error[~mask],
#image_weights=bin_weights[~mask],
#vel_center=vel_center_masked,
vel_widths=np.arange(0,1,1),
dgrs=dgrs,
intercepts=intercepts,
results_filename='',
return_likelihoods=True,
likelihood_filename=None,
clobber=False,
verbose=False
)
# Unpack output of likelihood calculation
(vel_range_confint, width_confint, dgr_confint, intercepts_confint,
likelihoods, width_likelihood, dgr_likelihood,
intercept_likelihood, width_max, dgr_max, intercept_max,
vel_range_max) = results
dgr_new = dgr_max
intercept = intercept_max
# Create model with the DGR
if verbose:
print('Iteration {0:.0f} results:'.format(iteration))
print('\tDGR = {0:.2} 10^20 cm^2 mag'.format(dgr_new))
print('\tIntercept = {0:.2f} mag'.format(intercept))
print('')
av_image_model = nhi_image * dgr_new + intercept
#if dgr == 1e10:
# residuals = av_data - av_image_model
#else:
# residuals = av_data - av_image_model + intercept
residuals = av_data - av_image_model
residuals[mask] = np.nan
if 0:
import matplotlib.pyplot as plt
plt.imshow(residuals, origin='lower')
plt.colorbar(cmap=plt.cm.gnuplot)
plt.show()
# Include only residuals which are white noise
if iteration == 0:
plot_filename = results_filename
else:
plot_filename = None
mask_new = get_residual_mask(residuals,
resid_width_scale=resid_width_scale,
plot_progress=plot_progress,
results_filename=plot_filename)
# Mask non-white noise, i.e. correlated residuals.
mask[mask_new] = 1
if verbose:
npix = mask.size - np.sum(mask)
print('\tNumber of non-masked pixels = {0:.0f}'.format(npix))
# Reset while loop conditions
delta_dgr = np.abs(dgr - dgr_new)
dgr = dgr_new
iteration += 1
# Plot results
if 0:
mask_new = get_residual_mask(residuals,
resid_width_scale=resid_width_scale,
plot_progress=plot_progress,
results_filename=results_filename)
# Create model of Av
av_model = dgr * nhi_image
av_model[mask] = np.nan
return (av_model, mask, dgr)
def calc_likelihoods(
hi_cube=None,
hi_vel_axis=None,
nhi_image=None,
av_image=None,
av_image_error=None,
image_weights=None,
vel_center=None,
vel_widths=None,
dgrs=None,
intercepts=None,
plot_results=False,
results_filename='',
return_likelihoods=True,
likelihood_filename=None,
clobber=False,
conf=0.68,
threshold_delta_dgr=0.0005,
verbose=False,
):
'''
Parameters
----------
Returns
-------
hi_vel_range : tuple
Lower and upper bound of HI velocity range in km/s which provides the
best likelihoodelated N(HI) distribution with Av.
likelihoods : array-like, optional
Array of Pearson likelihoodelation coefficients likelihoodesponding to each
permutation through the velocity centers and velocity widths.
'''
import numpy as np
from myimage_analysis import calculate_nhi
from os import path
from astropy.io import fits
from mystats import calc_symmetric_error
# Check if likelihood grid should be derived
if likelihood_filename is not None:
if not path.isfile(likelihood_filename):
perform_mle = True
write_mle = True
elif clobber:
perform_mle = True
write_mle = True
else:
perform_mle = False
write_mle = False
# If no filename provided, do not read file and do not write file
else:
write_mle = False
perform_mle = True
if perform_mle:
# calculate the likelihoodelation coefficient for each velocity
# range
likelihoods = np.zeros((len(vel_widths),
len(dgrs),
len(intercepts)))
# Progress bar parameters
total = float(likelihoods.size)
count = 0
for j, vel_width in enumerate(vel_widths):
# Construct N(HI) image outside of DGR loop, then apply
# DGRs in loop
# use the hi cube and vel range if no nhi image provided
if nhi_image is None:
vel_range = np.array((vel_center - vel_width / 2.,
vel_center + vel_width / 2.))
nhi_image = calculate_nhi(cube=hi_cube,
velocity_axis=hi_vel_axis,
velocity_range=vel_range,
return_nhi_error=False)
# Cycle through DGR to estimate error
for k, dgr in enumerate(dgrs):
for m, intercept in enumerate(intercepts):
# Create model of Av with N(HI) and DGR
av_image_model = nhi_image * dgr + intercept
logL = calc_logL(av_image_model,
av_image,
data_error=av_image_error,
weights=image_weights)
likelihoods[j, k, m] = logL
#print 'logL =', logL
# Shows progress each 10%
count += 1
abs_step = int((total * 1)/10) or 10
if count and not count % abs_step:
print "\t{0:.0%} processed".format(count/total)
nhi_image = None
# Load file of likelihoods
elif not perform_mle:
print('Reading likelihood grid file:')
print(likelihood_filename)
hdu = fits.open(likelihood_filename)
likelihoods = hdu[0].data
if len(vel_widths) != likelihoods.shape[0] or \
len(dgrs) != likelihoods.shape[1]:
raise ValueError('Specified parameter grid not the same as in' + \
'loaded data likelihoods.')
likelihoods = np.ma.array(likelihoods,
mask=(likelihoods != likelihoods))
# Normalize the log likelihoods
likelihoods -= likelihoods.max()
# Convert to likelihoods
likelihoods = np.exp(likelihoods)
# Normalize the likelihoods
likelihoods = likelihoods / np.nansum(likelihoods)
# Derive marginal distributions of both centers and widths
intercept_likelihood = np.sum(likelihoods, axis=(0, 1)) / \
np.sum(likelihoods)
width_likelihood = np.sum(likelihoods, axis=(1, 2)) / \
np.sum(likelihoods)
dgr_likelihood = np.sum(likelihoods, axis=(0, 2)) / \
np.sum(likelihoods)
# Derive confidence intervals of parameters
width_confint = calc_symmetric_error(vel_widths,
width_likelihood,
alpha=1.0 - conf)
dgr_confint = calc_symmetric_error(dgrs,
dgr_likelihood,
alpha=1.0 - conf)
intercept_confint = calc_symmetric_error(intercepts,
intercept_likelihood,
alpha=1.0 - conf)
# Get values of best-fit model parameters
max_loc = np.where(likelihoods == np.max(likelihoods))
width_max = vel_widths[max_loc[0][0]]
dgr_max = dgrs[max_loc[1][0]]
intercept_max = intercepts[max_loc[2][0]]
if verbose:
print('\nVelocity widths = ' + \
'{0:.2f} +{1:.2f}/-{2:.2f} km/s'.format(width_confint[0],
width_confint[2],
np.abs(width_confint[1])))
print('\nDGRs = ' + \
'{0:.2f} +{1:.2f}/-{2:.2f} 10^20 cm^2 mag'.format(dgr_confint[0],
dgr_confint[2],
np.abs(dgr_confint[1])))
print('\nIntercepts = ' + \
'{0:.2f} +{1:.2f}/-{2:.2f} 10^20 cm^2 mag'.format(intercept_confint[0],
intercept_confint[2],
np.abs(intercept_confint[1])))
# Write PDF
if vel_center is None:
vel_center = 0.0
upper_lim = (np.nanmean(vel_center) + width_confint[0]/2.)
lower_lim = (np.nanmean(vel_center) - width_confint[0]/2.)
upper_lim_error = width_confint[2]**2
lower_lim_error = width_confint[1]**2
vel_range_confint = (lower_lim, upper_lim, lower_lim_error,
upper_lim_error)
vel_range_max = (vel_center - width_max/2.0, vel_center + width_max/2.0)
if not return_likelihoods:
return vel_range_confint, dgr_confint
else:
return (vel_range_confint, width_confint, dgr_confint,
intercept_confint, likelihoods,
width_likelihood, dgr_likelihood,
intercept_likelihood, width_max, dgr_max, intercept_max,
vel_range_max)
def rebin_image(image, bin_size):
''' From stack overflow
http://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
'''
import numpy as np
shape = (image.shape[0] / bin_size, image.shape[1] / bin_size)
# Crop image for binning
image = image[:-image.shape[0] % shape[0], :-image.shape[1] % shape[1]]
# Rebin image
sh = shape[0],image.shape[0]//shape[0],shape[1],image.shape[1]//shape[1]
return image.reshape(sh).mean(-1).mean(1)
def bin_image(image, in_ext='', out_ext='_bin', width=1, clobber=True,
bin_dim=(0, 1), image_dir='./', verbose=False):
''' Bins and smooths image with MIRIAD imbin and imsmooth.
File must end in '.fits', leave out from in_ext and out_ext
width is in degrees
'''
from mirpy import fits, smooth, imbin
import os
# Change directories so that string is not too long for miriad
current_dir = os.getcwd()
os.chdir(image_dir)
if not check_file(image + in_ext + '.mir',
clobber=clobber,
verbose=verbose):
fits(image + in_ext + '.fits',
out=image + in_ext + '.mir',
op='xyin')
# Determine size of beam to smooth with
conv_beam = (width**2 - (5.0/60.0)**2)
# Smooth the image
if not check_file(image + in_ext + '_smooth.mir',
clobber=clobber,
verbose=verbose):
smooth(image + in_ext + '.mir',
out=image + in_ext + '_smooth.mir',
fwhm=conv_beam,
pa=0,
scale=0.0)
if not check_file(image + in_ext + '_smooth.fits',
clobber=clobber,
verbose=verbose):
fits(image + in_ext + '_smooth.mir',
out=image + in_ext + '_smooth.fits',
op='xyout')
# Determine number of pixels to bin
binsize = width * 60.0 / 5.0
if bin_dim == (0, 1):
bins = 4*(binsize,)
elif bin_dim == (1, 2):
bins = (binsize, binsize, binsize, binsize, 1, 1)
if not check_file(image + out_ext + '.mir',
clobber=clobber,
verbose=verbose):
imbin(image + in_ext + '.mir',
out=image + out_ext + '.mir',
bin=bins,
options='sum')
if not check_file(image + out_ext + '.fits',
clobber=clobber,
verbose=verbose):
fits(image + out_ext + '.mir',
out=image + out_ext + '.fits',
op='xyout')
# Go back to original working directory
os.chdir(current_dir)
return binsize
def check_file(filename, clobber=False, verbose=False):
import os
exists = False
if os.path.isfile(filename) or os.path.isdir(filename):
exists = True
if verbose:
print('\tImage {:s} exists'.format(filename))
if clobber:
if verbose:
print('\tDeleting image {:s}'.format(filename))
os.system('rm -rf {:s}'.format(filename))
exists = False
return exists
''' DS9 Region and Coordinate Functions
'''
def convert_limit_coordinates(prop_dict,
coords=('region_limit', 'co_noise_limits', 'plot_limit'), header=None):
# Initialize pixel keys
for coord in coords:
prop_dict[coord].update({'pixel': []})
if coord in ('region_limit',
'plot_limit',
'region_limit_bin',
'plot_limit_bin'):
limit_wcs = prop_dict[coord]['wcs']
for limits in limit_wcs:
# convert centers to pixel coords
limit_pixels = get_pix_coords(ra=limits[0],
dec=limits[1],
header=header)[:2].tolist()
prop_dict[coord]['pixel'].append(limit_pixels[0])
prop_dict[coord]['pixel'].append(limit_pixels[1])
elif coord == 'co_noise_limits':
region_limits = prop_dict[coord]['wcs']
# Cycle through each region, convert WCS limits to pixels
for region in region_limits:
region_pixels = []
for limits in region:
# convert centers to pixel coords
limit_pixels = get_pix_coords(ra=limits[0],
dec=limits[1],
header=header)[:2].tolist()
region_pixels.append(limit_pixels)
# Append individual regions back to CO noise
prop_dict[coord]['pixel'].append(region_pixels)
return prop_dict
def get_sub_image(image, indices):
return image[indices[1]:indices[3],
indices[0]:indices[2]]
def get_pix_coords(ra=None, dec=None, header=None):
''' Ra and dec in (hrs,min,sec) and (deg,arcmin,arcsec), or Ra in degrees
and dec in degrees.
'''
import pywcsgrid2 as wcs
import pywcs
# convert to degrees if ra and dec are array-like
try:
if len(ra) == 3 and len(dec) == 3:
ra_deg, dec_deg = hrs2degs(ra=ra, dec=dec)
else:
raise ValueError('RA and Dec must be in (hrs,min,sec) and' + \
' (deg,arcmin,arcsec) or in degrees.')
except TypeError:
ra_deg, dec_deg = ra, dec
wcs_header = pywcs.WCS(header)
pix_coords = wcs_header.wcs_sky2pix([[ra_deg, dec_deg, 0]], 0)[0]
return pix_coords
def hrs2degs(ra=None, dec=None):
''' Ra and dec tuples in hrs min sec and deg arcmin arcsec.
'''
ra_deg = 15*(ra[0] + ra[1]/60. + ra[2]/3600.)
dec_deg = dec[0] + dec[1]/60. + dec[2]/3600.
return (ra_deg, dec_deg)
def load_ds9_region(props, filename=None, header=None):
import pyregion as pyr
# region[0] in following format:
# [64.26975, 29.342033333333333, 1.6262027777777777, 3.32575, 130.0]
# [ra center, dec center, width, height, rotation angle]
regions = pyr.open(filename)
props['regions'] = {}
for region in regions:
# Cores defined in following format: 'tag={L1495A}'
tag = region.comment
region_name = tag[tag.find('text={')+6:tag.find('}')].lower()
# Format vertices to be 2 x N array
poly_verts = []
for i in xrange(0, len(region.coord_list)/2):
poly_verts.append((region.coord_list[2*i],
region.coord_list[2*i+1]))
poly_verts_pix = []
for i in xrange(0, len(poly_verts)):
poly_verts_pix.append(get_pix_coords(ra=poly_verts[i][0],
dec=poly_verts[i][1],
header=header)[:-1][::-1].tolist())
props['regions'][region_name] = {}
props['regions'][region_name]['poly_verts'] = {}
props['regions'][region_name]['poly_verts']['wcs'] = poly_verts
props['regions'][region_name]['poly_verts']['pixel'] = poly_verts_pix
return props
def run_likelihood_analysis(av_data_type='planck', region=None,
vel_range=None, resid_width_scale=3.0):
# Import external modules
# -----------------------
import numpy as np
from os import system,path
import mygeometry as myg
from mycoords import make_velocity_axis
import json
from myimage_analysis import calculate_nhi, calculate_noise_cube, bin_image
#from astropy.io import fits
import pyfits as fits
import matplotlib.pyplot as plt
# Set parameters
# --------------
# Check if likelihood file already written, rewrite?
clobber = 1
# Confidence of parameter errors
conf = 0.68
# Confidence of contour levels
contour_confs = (0.95,)
# Name of HI noise cube
noise_cube_filename = 'california_hi_galfa_cube_regrid_planckres_noise'
# Threshold for converging DGR
threshold_delta_dgr = 0.00005
# Name of property files results are written to
global_property_file = 'aldobaran_global_properties'
# Likelihood axis resolutions
vel_widths = np.arange(1, 75, 2*0.16667)
dgrs = np.arange(0.001, 0.8, 1e-3)
intercepts = np.arange(0, 1, 1)
# Velocity range over which to integrate HI for deriving the mask
if vel_range is None:
vel_range = (2.2,7.6)
vel_range = (1.7,7.7)
vel_range = (-5, 15)
# Bin width in degrees
bin_width_deg = 1.0
# Clobber the binned images and remake them?
clobber_bin_images = True
# Use single velocity center for entire image?
single_vel_center = True
# Filetype extensions for figures
figure_types = ('png', 'pdf')
# define directory locations
# --------------------------
output_dir = '/d/bip3/ezbc/aldobaran/data/python_output/nhi_av/'
figure_dir = \
'/d/bip3/ezbc/aldobaran/figures/'
av_dir = '/d/bip3/ezbc/california/data/av/'
hi_dir = '/d/bip3/ezbc/california/data/hi/'
co_dir = '/d/bip3/ezbc/california/data/co/'
core_dir = '/d/bip3/ezbc/aldobaran/data/python_output/core_properties/'
property_dir = '/d/bip3/ezbc/aldobaran/data/python_output/'
region_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
likelihood_dir = '/d/bip3/ezbc/aldobaran/data/python_output/nhi_av/'
# Load data
# ---------
# Adjust filenames
#noise_cube_filename += bin_string
likelihood_filename = 'aldobaran_likelihood_{0:s}_bin'.format(av_data_type)
results_filename = 'aldobaran_likelihood_{0:s}_bin'.format(av_data_type)
# load Planck Av and GALFA HI images, on same grid
if av_data_type == 'k09':
print('\nLoading K+09 2MASS data...')
av_data, av_header = fits.getdata(av_dir + \
'california_av_k09_regrid_planckres.fits',
header=True)
av_data_error = 0.1 * np.ones(av_data.shape)
else:
print('\nLoading Planck data...')
av_data, av_header = fits.getdata(av_dir + \
'california_av_planck_5arcmin.fits',
header=True)
av_data_error, av_error_header = fits.getdata(av_dir + \
'california_av_error_planck_5arcmin.fits',
header=True)
hi_data, hi_header = fits.getdata(hi_dir + \
'california_hi_galfa_cube_regrid_planckres.fits',
header=True)
# Load global properties
with open(property_dir + global_property_file + '.txt', 'r') as f:
global_props = json.load(f)
# Prepare data products
# ---------------------
# Name correct region of cloud
if region == 1:
region_name = 'aldobaran1'
elif region == 2:
region_name = 'aldobaran2'
else:
region_name = 'aldobaran'
global_property_file = global_property_file.replace('aldobaran', region_name)
# Change WCS coords to pixel coords of images
global_props = convert_limit_coordinates(global_props, header=av_header)
# make the velocity axes
hi_vel_axis = make_velocity_axis(hi_header)
# Load the HI noise cube if it exists, else make it
if not path.isfile(hi_dir + noise_cube_filename + '.fits'):
noise_cube = calculate_noise_cube(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_noise_range=[90,110], header=hi_header, Tsys=30.,
filename=hi_dir + noise_cube_filename + '.fits')
else:
noise_cube, noise_header = fits.getdata(hi_dir +
noise_cube_filename + '.fits',
header=True)
# Load cloud division regions from ds9
global_props = load_ds9_region(global_props,
filename=region_dir + 'multicloud_divisions.reg',
header=av_header)
# Derive relevant region
region_vertices = \
global_props['regions'][region_name]['poly_verts']['pixel']
# block off region
region_mask = np.logical_not(myg.get_polygon_mask(av_data,
region_vertices))
if 0:
import matplotlib.pyplot as plt
plt.imshow(np.ma.array(av_data, mask=region_mask), origin='lower')
plt.colorbar()
plt.show()
print('\nRegion size = ' + \
'{0:.0f} pix'.format(region_mask[region_mask == 1].size))
# Derive mask by excluding correlated residuals
# ---------------------------------------------
nhi_image, nhi_image_error = calculate_nhi(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_range=vel_range,
noise_cube=noise_cube,
velocity_noise_range=[90, 110],
Tsys=30.0,
return_nhi_error=True,
)
if 0:
vel_center = np.zeros(hi_data.shape[1:])
for i in xrange(0, hi_data.shape[1]):
for j in xrange(0, hi_data.shape[2]):
hi_spectrum = hi_data[:, i, j]
hi_spectrum[np.isnan(hi_spectrum)] = 0.0
if np.nansum(hi_spectrum) > 0:
vel_center[i,j] = \
np.array((np.average(hi_vel_axis,
weights=hi_spectrum**2),))[0]
else:
vel_center[i,j] = np.nan
vel_range = (vel_center - 5, vel_center + 5)
nhi_image1, nhi_image_error = calculate_nhi(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_range=vel_range,
noise_cube=noise_cube,
velocity_noise_range=[90, 110],
Tsys=30.0,
return_nhi_error=True,
)
hi_spectrum = np.sum(hi_data, axis=(1,2))
vel_center = np.array((np.average(hi_vel_axis,
weights=hi_spectrum**2),))[0]
vel_range = (vel_center - 5, vel_center + 5)
nhi_image2, nhi_image_error = calculate_nhi(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_range=vel_range,
noise_cube=noise_cube,
velocity_noise_range=[90, 110],
Tsys=30.0,
return_nhi_error=True,
)
import matplotlib.pyplot as plt
plt.close(); plt.clf()
plt.imshow(nhi_image1 - nhi_image2, origin='lower left')
plt.colorbar()
plt.show()
print('\nDeriving mask for correlated residuals...')
av_model, mask, dgr = iterate_residual_masking(
nhi_image=nhi_image,
nhi_image_error=nhi_image_error,
av_data=av_data,
av_data_error=av_data_error,
vel_range=vel_range,
dgrs=dgrs,
intercepts=intercepts,
threshold_delta_dgr=threshold_delta_dgr,
resid_width_scale=resid_width_scale,
init_mask=region_mask,
verbose=1,
plot_progress=0,
results_filename=figure_dir + 'likelihood/'\
'aldobaran_residual_pdf.pdf'
)
# Combine region mask with new mask
#mask += np.logical_not(region_mask)
mask += region_mask
mask = mask.astype('bool')
# Write full resolution mask to parameters
global_props['mask'] = mask.tolist()
if 1:
import matplotlib.pyplot as plt
plt.imshow(np.ma.array(av_data, mask=mask), origin='lower')
plt.show()
# Bin the masked images
# ---------------------
print('\nBinning masked images...')
# Mask the data with nans
av_data[mask] = np.nan
av_data_error[mask] = np.nan
hi_data[:, mask] = np.nan
if not check_file(av_dir + 'california_av_planck_5arcmin_masked.fits',
clobber=clobber_bin_images):
fits.writeto(av_dir + 'california_av_planck_5arcmin_masked.fits',
av_data,
av_header)
if not check_file(av_dir + 'california_av_error_planck_5arcmin_masked.fits',
clobber=clobber_bin_images):
fits.writeto(av_dir + 'california_av_error_planck_5arcmin_masked.fits',
av_data_error,
av_header)
if not check_file(hi_dir + \
'california_hi_galfa_cube_regrid_planckres_masked.fits',
clobber=clobber_bin_images):
fits.writeto(hi_dir + \
'california_hi_galfa_cube_regrid_planckres_masked.fits',
hi_data,
hi_header)
if clobber_bin_images:
# Define number of pixels in each bin
binsize = bin_width_deg * 60.0 / 5.0
# Bin the images, retain only one bin_weight image since they are all
# the same
# -------------------------------------------------------------------
# Av image
av_data_bin, av_header_bin, bin_weights = \
bin_image(av_data,
binsize=(binsize, binsize),
header=av_header,
func=np.nanmean,
return_weights=True)
if not check_file(av_dir + 'california_av_planck_5arcmin_bin.fits',
clobber=clobber_bin_images):
fits.writeto(av_dir + 'california_av_planck_5arcmin_bin.fits',
av_data_bin,
av_header_bin)
if not check_file(av_dir + 'california_av_planck_5arcmin_bin_weights.fits',
clobber=clobber_bin_images):
fits.writeto(av_dir + 'california_av_planck_5arcmin_bin_weights.fits',
bin_weights,
av_header_bin)
# Av image error
# Errors add in square
# mean = sum(a_i) / n
# error on mean = sqrt(sum(a_i**2 / n**2))
noise_func = lambda x: np.nansum(x**2)**0.5 / x[~np.isnan(x)].size
av_data_error_bin, av_header_bin = \
bin_image(av_data_error,
binsize=(binsize, binsize),
header=av_header,
func=noise_func,)
if not check_file(av_dir + 'california_av_error_planck_5arcmin_bin.fits',
clobber=clobber_bin_images):
fits.writeto(av_dir + 'california_av_error_planck_5arcmin_bin.fits',
av_data_error_bin,
av_header_bin)
# Hi image
hi_data_bin, hi_header_bin = \
bin_image(hi_data,
binsize=(binsize, binsize),
header=hi_header,
func=np.nanmean)
if not check_file(hi_dir + \
'california_hi_galfa_cube_regrid_planckres_bin.fits',
clobber=clobber_bin_images):
fits.writeto(hi_dir + \
'california_hi_galfa_cube_regrid_planckres_bin.fits',
hi_data_bin,
hi_header_bin)
# Load data
# ---------
bin_string = '_bin'
# Adjust filenames
noise_cube_filename += bin_string
likelihood_filename = '{0:s}_likelihood_{1:s}_bin'.format(region_name,
av_data_type)
results_filename = '{0:s}_likelihood_{1:s}_bin'.format(region_name,
av_data_type)
av_data, av_header = fits.getdata(av_dir + \
'california_av_planck_5arcmin' + bin_string + '.fits',
header=True)
av_data_error, av_error_header = fits.getdata(av_dir + \
'california_av_error_planck_5arcmin' + bin_string + '.fits',
header=True)
bin_weights = fits.getdata(av_dir + \
'california_av_planck_5arcmin' + bin_string + \
'_weights.fits',)
hi_data, hi_header = fits.getdata(hi_dir + \
'california_hi_galfa_cube_regrid_planckres' + \
bin_string + '.fits',
header=True)
# Load the HI noise cube if it exists, else make it
if not path.isfile(hi_dir + noise_cube_filename + '.fits'):
noise_cube = calculate_noise_cube(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_noise_range=[90,110], header=hi_header, Tsys=30.,
filename=hi_dir + noise_cube_filename + '.fits')
else:
noise_cube, noise_header = fits.getdata(hi_dir +
noise_cube_filename + '.fits',
header=True)
# Prepare data products
# ---------------------
# Change WCS coords to pixel coords of images
global_props['region_limit_bin'] = global_props['region_limit'].copy()
global_props['plot_limit_bin'] = global_props['plot_limit'].copy()
global_props = convert_limit_coordinates(global_props,
header=av_header,
coords=('region_limit_bin',
'plot_limit_bin'))
# Derive relevant region
pix = global_props['region_limit_bin']['pixel']
region_vertices = ((pix[1], pix[0]),
(pix[1], pix[2]),
(pix[3], pix[2]),
(pix[3], pix[0])
)
# block off region
region_mask = np.logical_not(myg.get_polygon_mask(av_data,
region_vertices))
print('\nRegion size = ' + \
'{0:.0f} pix'.format(region_mask[region_mask == 1].size))
# Mask out the NaNs
mask = (np.isnan(av_data) & \
np.isnan(av_data_error) & \
np.isnan(np.sum(hi_data, axis=0)))
mask += region_mask
mask = mask.astype(bool)
# Derive center velocity from hi
# ------------------------------
if single_vel_center:
hi_spectrum = np.sum(hi_data[:, ~mask], axis=(1))
vel_center = np.array((np.average(hi_vel_axis,
weights=hi_spectrum**2),))[0]
print('\nVelocity center from HI = ' +\
'{0:.2f} km/s'.format(vel_center))
vel_center_masked = vel_center
else:
vel_center = np.zeros(hi_data.shape[1:])
for i in xrange(0, hi_data.shape[1]):
for j in xrange(0, hi_data.shape[2]):
hi_spectrum = hi_data[:, i, j]
hi_spectrum[np.isnan(hi_spectrum)] = 0.0
if np.nansum(hi_spectrum) > 0:
vel_center[i,j] = \
np.array((np.average(hi_vel_axis,
weights=hi_spectrum**2),))[0]
else:
vel_center[i,j] = np.nan
vel_center_masked = vel_center[~mask]
# Perform likelihood calculation of masked images
# -----------------------------------------------
# Define filename for plotting results
results_filename = figure_dir + 'likelihood/'+ results_filename
print('\nPerforming likelihood calculations with initial error ' + \
'estimate...')
results = calc_likelihoods(
hi_cube=hi_data[:, ~mask],
hi_vel_axis=hi_vel_axis,
av_image=av_data[~mask],
av_image_error=av_data_error[~mask],
#image_weights=bin_weights[~mask],
vel_center=vel_center_masked,
vel_widths=vel_widths,
dgrs=dgrs,
intercepts=intercepts,
results_filename='',
return_likelihoods=True,
likelihood_filename=None,
clobber=False,
conf=conf,
)
# Unpack output of likelihood calculation
(vel_range_confint, width_confint, dgr_confint, intercepts_confint,
likelihoods, width_likelihood, dgr_likelihood,
intercept_likelihood, width_max, dgr_max, intercept_max,
vel_range_max) = results
print('\nHI velocity integration range:')
print('%.1f to %.1f km/s' % (vel_range_confint[0],
vel_range_confint[1]))
vel_range_max = (vel_center - width_max / 2.0,
vel_center + width_max / 2.0)
# Calulate chi^2 for best fit models
# ----------------------------------
nhi_image_temp = calculate_nhi(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_range=vel_range_max,
noise_cube=noise_cube,
return_nhi_error=False)
av_image_model = nhi_image_temp * dgr_max + intercept_max
# count number of pixels used in analysis
npix = mask[~mask].size
# finally calculate chi^2
#chisq = np.sum((av_data[~mask] - av_image_model[~mask])**2 / \
# av_data_error[~mask]**2) / av_data[~mask].size
print('\nTotal number of pixels in analysis, after masking = ' + \
'{0:.0f}'.format(npix))
#print('\nReduced chi^2 = {0:.1f}'.format(chisq))
# Write results to global properties
global_props['dust2gas_ratio'] = {}
global_props['dust2gas_ratio_error'] = {}
global_props['intercept'] = {}
global_props['intercept_error'] = {}
global_props['hi_velocity_width'] = {}
global_props['hi_velocity_width_error'] = {}
global_props['dust2gas_ratio_max'] = {}
global_props['intercept_max'] = {}
global_props['hi_velocity_center'] = {}
global_props['hi_velocity_width_max'] = {}
global_props['hi_velocity_range_max'] = {}
global_props['av_threshold'] = {}
global_props['co_threshold'] = {}
global_props['hi_velocity_width']['value'] = width_confint[0]
global_props['hi_velocity_width']['unit'] = 'km/s'
global_props['hi_velocity_width_error']['value'] = width_confint[1:]
global_props['hi_velocity_width_error']['unit'] = 'km/s'
global_props['hi_velocity_range'] = vel_range_confint[0:2]
global_props['hi_velocity_range_error'] = vel_range_confint[2:]
global_props['dust2gas_ratio']['value'] = dgr_confint[0]
global_props['dust2gas_ratio_error']['value'] = dgr_confint[1:]
global_props['dust2gas_ratio_max']['value'] = dgr_max
global_props['intercept_max']['value'] = intercept_max
global_props['intercept']['value'] = intercepts_confint[0]
global_props['intercept_error']['value'] = intercepts_confint[1:]
global_props['hi_velocity_center']['value'] = vel_center.tolist()
#global_props['hi_velocity_width_max']['value'] = width_max
#global_props['hi_velocity_range_max']['value'] = vel_range_max
global_props['hi_velocity_range_conf'] = conf
global_props['width_likelihood'] = width_likelihood.tolist()
global_props['dgr_likelihood'] = dgr_likelihood.tolist()
global_props['vel_centers'] = vel_center.tolist()
global_props['vel_widths'] = vel_widths.tolist()
global_props['dgrs'] = dgrs.tolist()
global_props['likelihoods'] = likelihoods.tolist()
global_props['av_threshold']['value'] = None
global_props['av_threshold']['unit'] = 'mag'
global_props['co_threshold']['value'] = None
global_props['co_threshold']['unit'] = 'K km/s'
#global_props['chisq'] = chisq
global_props['npix'] = npix
global_props['mask_bin'] = mask.tolist()
global_props['use_binned_image'] = True
global_props['residual_width_scale'] = resid_width_scale
global_props['threshold_delta_dgr'] = threshold_delta_dgr
# Write the file
print('\nWriting results to\n' + global_property_file + '_' + \
av_data_type + '_init.txt')
with open(property_dir + global_property_file + '_' + av_data_type + \
'_init.txt', 'w') as f:
json.dump(global_props, f, allow_nan=True)
# Plot likelihood space
for figure_type in figure_types:
print('\nWriting likelihood image to\n' + results_filename + \
'_init_wd.{0:s}'.format(figure_type))
plot_likelihoods_hist(global_props,
plot_axes=('widths', 'dgrs'),
show=0,
returnimage=False,
filename=results_filename + \
'_init_wd.{0:s}'.format(figure_type),
contour_confs=contour_confs)
# Rerun analysis with new error calculated
# Error should be calculated across entire image, not just atomic regions,
# in order to understand variation in DGR
# -------------------------------------------------------------------------
# Calculate new standard deviation, set global variable
# npix - 2 is the number of degrees of freedom
# see equation 15.1.6 in Numerical Recipes
std = np.sqrt(np.sum((av_data[~mask] - av_image_model[~mask])**2 \
/ (av_data[~mask].size - 2)))
#std = np.sqrt(np.sum((av_data - av_image_model)**2 \
# / (av_data.size - 2)))
av_data_error = std * np.ones(av_data_error.shape)
#av_image_error += np.std(av_data[~mask] - av_image_model[~mask])
print('\nSystematic error between model and data Av images:')
print('\tstd(model - data) = {0:.3f} mag'.format(av_data_error[0, 0]))
# Perform likelihood calculation of masked images
# -----------------------------------------------
print('\nPerforming likelihood calculations with scaled error ' + \
'estimate...')
results = calc_likelihoods(
hi_cube=hi_data[:, ~mask],
hi_vel_axis=hi_vel_axis,
av_image=av_data[~mask],
av_image_error=av_data_error[~mask],
image_weights=bin_weights[~mask],
vel_center=vel_center_masked,
vel_widths=vel_widths,
dgrs=dgrs,
intercepts=intercepts,
results_filename='',
return_likelihoods=True,
likelihood_filename=None,
clobber=False,
conf=conf,
)
# Unpack output of likelihood calculation
(vel_range_confint, width_confint, dgr_confint, intercept_confint,
likelihoods, width_likelihood, dgr_likelihood,
intercept_likelihood, width_max, dgr_max, intercept_max,
vel_range_max) = results
print('\nHI velocity integration range:')
print('%.1f to %.1f km/s' % (vel_range_confint[0],
vel_range_confint[1]))
# Calulate chi^2 for best fit models
# ----------------------------------
nhi_image_temp = \
calculate_nhi(cube=hi_data,
velocity_axis=hi_vel_axis,
velocity_range=vel_range_max,
noise_cube=noise_cube)
av_image_model = nhi_image_temp * dgr_max
# avoid NaNs
indices = ((av_image_model == av_image_model) & \
(av_data == av_data))
# add nan locations to the mask
mask[~indices] = 1
# count number of pixels used in analysis
npix = mask[~mask].size
# finally calculate chi^2
#chisq = np.sum((av_data[~mask] - av_image_model[~mask])**2 / \
# av_data_error[~mask]**2) / av_data[~mask].size
print('\nTotal number of pixels in analysis, after masking = ' + \
'{0:.0f}'.format(npix))
#print('\nReduced chi^2 = {0:.1f}'.format(chisq))
# Write results to global properties
global_props['dust2gas_ratio'] = {}
global_props['dust2gas_ratio_error'] = {}
global_props['intercept'] = {}
global_props['intercept_error'] = {}
global_props['hi_velocity_width'] = {}
global_props['hi_velocity_width_error'] = {}
global_props['dust2gas_ratio_max'] = {}
global_props['intercept_max'] = {}
global_props['hi_velocity_center'] = {}
global_props['hi_velocity_width_max'] = {}
global_props['hi_velocity_range_max'] = {}
global_props['av_threshold'] = {}
global_props['co_threshold'] = {}
global_props['hi_velocity_width']['value'] = width_confint[0]
global_props['hi_velocity_width']['unit'] = 'km/s'
global_props['hi_velocity_width_max']['value'] = width_max
global_props['hi_velocity_width_max']['unit'] = 'km/s'
global_props['hi_velocity_width_error']['value'] = width_confint[1:]
global_props['hi_velocity_width_error']['unit'] = 'km/s'
global_props['hi_velocity_range'] = vel_range_confint[0:2]
global_props['hi_velocity_range_error'] = vel_range_confint[2:]
global_props['dust2gas_ratio']['value'] = dgr_confint[0]
global_props['dust2gas_ratio_error']['value'] = dgr_confint[1:]
global_props['dust2gas_ratio_max']['value'] = dgr_max
global_props['intercept']['value'] = intercept_confint[0]
global_props['intercept_error']['value'] = intercept_confint[1:]
global_props['intercept_max']['value'] = intercept_max
global_props['hi_velocity_center']['value'] = vel_center.tolist()
#global_props['hi_velocity_width_max']['value'] = width_max
#global_props['hi_velocity_range_max']['value'] = vel_range_max
global_props['hi_velocity_range_conf'] = conf
global_props['width_likelihood'] = width_likelihood.tolist()
global_props['dgr_likelihood'] = dgr_likelihood.tolist()
global_props['intercept_likelihood'] = intercept_likelihood.tolist()
global_props['vel_centers'] = vel_center.tolist()
global_props['vel_widths'] = vel_widths.tolist()
global_props['dgrs'] = dgrs.tolist()
global_props['intercepts'] = intercepts.tolist()
global_props['likelihoods'] = likelihoods.tolist()
global_props['av_threshold']['value'] = None
global_props['av_threshold']['unit'] = 'mag'
global_props['co_threshold']['value'] = None
global_props['co_threshold']['unit'] = 'K km/s'
#global_props['chisq'] = chisq
global_props['npix'] = npix
global_props['mask_bin'] = mask.tolist()
global_props['use_binned_image'] = True
global_props['residual_width_scale'] = resid_width_scale
global_props['threshold_delta_dgr'] = threshold_delta_dgr
# Write the file
print('\nWriting results to\n' + global_property_file + '_' + \
av_data_type + '_scaled.txt')
with open(property_dir + global_property_file + '_' + av_data_type + \
'_scaled.txt', 'w') as f:
json.dump(global_props, f)
# Plot likelihood space
for figure_type in figure_types:
print('\nWriting likelihood image to\n' + results_filename + \
'_scaled_wd.{0:s}'.format(figure_type))
plot_likelihoods_hist(global_props,
plot_axes=('widths', 'dgrs'),
show=0,
returnimage=False,
filename=results_filename + \
'_scaled_wd.{0:s}'.format(figure_type),
contour_confs=contour_confs)
#return global_props['hi_velocity_range']
return global_props
'''
Main Script
'''
def main():
import numpy as np
from os import path
import json
from pandas import DataFrame
av_data_type = 'planck'
# threshold in velocity range difference
vel_range_diff_thres = 3.0 # km/s
property_dir = \
'/d/bip3/ezbc/aldobaran/data/python_output/residual_parameter_results/'
final_property_dir = '/d/bip3/ezbc/aldobaran/data/python_output/'
property_filename = 'aldobaran_global_properties_planck'
# Number of white noise standard deviations with which to fit the
# residuals in iterative masking
residual_width_scales = [3.0,]
regions = [None, ]
clobber_results = True
table_cols = ('dust2gas_ratio', 'hi_velocity_width',
'hi_velocity_width', 'intercept', 'residual_width_scale')
n = len(residual_width_scales)
table_df = DataFrame({col:np.empty(n) for col in table_cols})
for region in regions:
# Grab correct region
if region == 1:
region_name = 'aldobaran1'
elif region == 2:
region_name = 'aldobaran2'
else:
region_name = 'aldobaran'
property_filename = 'aldobaran_global_properties_planck'
property_filename = property_filename.replace('aldobaran', region_name)
print('\nPerforming likelihood derivations for ' + region_name)
for i, residual_width_scale in enumerate(residual_width_scales):
iteration = 0
vel_range = (-20.0, 30.0)
vel_range_new = (-1.0, 1.0)
vel_range_diff = np.sum(np.abs(np.array(vel_range) - \
np.array(vel_range_new)))
while vel_range_diff > vel_range_diff_thres:
json_filename = property_dir + property_filename + '_' + \
av_data_type + \
'_residscale{0:.1f}'.format(residual_width_scale)\
+ '_iter{0:.0f}'.format(iteration) + '.txt'
exists = path.isfile(json_filename)
print('Writing iteration data file to ' + json_filename)
if exists and not clobber_results:
with open(json_filename, 'r') as f:
global_props = json.load(f)
else:
global_props = \
run_likelihood_analysis(av_data_type=av_data_type,
vel_range=vel_range,
region=region,
resid_width_scale=residual_width_scale)
vel_range_new = global_props['hi_velocity_range']
vel_range_diff = np.sum(np.abs(np.array(vel_range) - \
np.array(vel_range_new)))
if clobber_results:
with open(json_filename, 'w') as f:
json.dump(global_props, f)
print('\n\n\n Next iteration \n-------------------\n\n\n')
print('Velocity range difference =' + \
' {0:.1f}'.format(vel_range_diff))
vel_range = vel_range_new
iteration += 1
# Write important results to table
for col in table_df:
if col == 'residual_width_scale':
table_df[col][i] = global_props[col]
else:
table_df[col][i] = global_props[col]['value']
# Write the file
print('\nWriting results to\n' + property_filename + \
'_' + av_data_type + '_scaled.txt')
with open(final_property_dir + property_filename +\
'_' + av_data_type + '_scaled.txt', 'w') as f:
json.dump(global_props, f)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cda6831f28cfe2770d2ced88e722a7cac36e772a | 468f54cf08a68fd3791f5266996eeb82331db338 | /pyisy/variables/variable.py | ce6f4ad83663bf37bb35e3dc1a63fa58b77a36d3 | [
"Apache-2.0"
] | permissive | OverloadUT/PyISY | 79d0b446f8c5358aa31743e8bd633b1d7f4762f2 | 69553057ceac57a6b4300c0070ee4ff163681750 | refs/heads/master | 2021-01-20T04:54:43.622920 | 2020-05-16T18:57:50 | 2020-05-16T18:57:50 | 89,751,436 | 0 | 0 | null | 2017-04-28T23:18:44 | 2017-04-28T23:18:44 | null | UTF-8 | Python | false | false | 5,988 | py | """Manage variables from the ISY."""
from ..constants import (
ATTR_INIT,
ATTR_LAST_CHANGED,
ATTR_LAST_UPDATE,
ATTR_SET,
ATTR_STATUS,
ATTR_TS,
PROTO_INT_VAR,
PROTO_STATE_VAR,
TAG_ADDRESS,
URL_VARIABLES,
VAR_INTEGER,
)
from ..helpers import EventEmitter, now
class Variable:
"""
Object representing a variable on the controller.
| variables: The variable manager object.
| vid: List of variable IDs.
| vtype: List of variable types.
| init: List of values that variables initialize to when the controller
starts.
| val: The current variable value.
| ts: The timestamp for the last time the variable was edited.
:ivar init: Watched property that represents the value the variable
initializes to when the controller boots.
:ivar lastEdit: Watched property that indicates the last time the variable
was edited.
:ivar val: Watched property that represents the value of the variable.
"""
def __init__(self, variables, vid, vtype, vname, init, status, ts):
"""Initialize a Variable class."""
super(Variable, self).__init__()
self._id = vid
self._init = init
self._last_edited = ts
self._last_update = now()
self._last_changed = now()
self._name = vname
self._status = status
self._type = vtype
self._variables = variables
self.isy = variables.isy
self.status_events = EventEmitter()
def __str__(self):
"""Return a string representation of the variable."""
return f"Variable(type={self._type}, id={self._id}, value={self.status}, init={self.init})"
def __repr__(self):
"""Return a string representation of the variable."""
return str(self)
@property
def address(self):
"""Return the formatted Variable Type and ID."""
return f"{self._type}.{self._id}"
@property
def init(self):
"""Return the initial state."""
return self._init
@init.setter
def init(self, value):
"""Set the initial state and notify listeners."""
if self._init != value:
self._init = value
self._last_changed = now()
self.status_events.notify(self.status_feedback)
return self._init
@property
def last_changed(self):
"""Return the UTC Time of the last status change for this node."""
return self._last_changed
@property
def last_edited(self):
"""Return the last edit time."""
return self._last_edited
@last_edited.setter
def last_edited(self, value):
"""Set the last edited time."""
if self._last_edited != value:
self._last_edited = value
return self._last_edited
@property
def last_update(self):
"""Return the UTC Time of the last update for this node."""
return self._last_update
@last_update.setter
def last_update(self, value):
"""Set the last update time."""
if self._last_update != value:
self._last_update = value
return self._last_update
@property
def protocol(self):
"""Return the protocol for this entity."""
return PROTO_INT_VAR if self._type == VAR_INTEGER else PROTO_STATE_VAR
@property
def name(self):
"""Return the Variable Name."""
return self._name
@property
def status(self):
"""Return the current node state."""
return self._status
@status.setter
def status(self, value):
"""Set the current node state and notify listeners."""
if self._status != value:
self._status = value
self._last_changed = now()
self.status_events.notify(self.status_feedback)
return self._status
@property
def status_feedback(self):
"""Return information for a status change event."""
return {
TAG_ADDRESS: self.address,
ATTR_STATUS: self._status,
ATTR_INIT: self._init,
ATTR_TS: self._last_edited,
ATTR_LAST_CHANGED: self._last_changed,
ATTR_LAST_UPDATE: self._last_update,
}
@property
def vid(self):
"""Return the Variable ID."""
return self._id
def update(self, wait_time=0):
"""
Update the object with the variable's parameters from the controller.
| wait_time: Seconds to wait before updating.
"""
self._last_update = now()
self._variables.update(wait_time)
def set_init(self, val):
"""
Set the initial value for the variable after the controller boots.
| val: The value to have the variable initialize to.
"""
if val is None:
raise ValueError("Variable init must be an integer. Got None.")
self.set_value(val, True)
def set_value(self, val, init=False):
"""
Set the value of the variable.
| val: The value to set the variable to.
"""
if val is None:
raise ValueError("Variable value must be an integer. Got None.")
req_url = self.isy.conn.compile_url(
[
URL_VARIABLES,
ATTR_INIT if init else ATTR_SET,
str(self._type),
str(self._id),
str(val),
]
)
if not self.isy.conn.request(req_url):
self.isy.log.warning(
"ISY could not set variable%s: %s.%s",
" init value" if init else "",
str(self._type),
str(self._id),
)
return
self.isy.log.debug(
"ISY set variable%s: %s.%s",
" init value" if init else "",
str(self._type),
str(self._id),
)
if not self.isy.auto_update:
self.update()
| [
"[email protected]"
] | |
ee16236cee149923cbec2f4caed7e7a2a579a99e | 67c77918ba8bb55b8508a252d7bddbbaa1cd414e | /Bioinformatics_Textbook_Track/02-BA1B.py | d7b475d32f515903acd3a28076de07f5db946532 | [] | no_license | anoubhav/Rosalind-Solutions | 52406c4677d9daa68814141c9bf22d19ad896a8b | d7b591fb4850fa2f5c1b146eafabe77945c94d34 | refs/heads/master | 2021-08-07T06:25:47.056356 | 2020-04-10T10:52:20 | 2020-04-10T10:52:20 | 144,815,045 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from collections import defaultdict
def MostFreqKmer(dna, k):
""" Finds the most frequent k-mers in a string
(str, int) -> (list of str) """
l = len(dna)
kmer_count = defaultdict(int)
for i in range(l - k + 1):
kmer_count[dna[i:i+k]] += 1
temp = max(kmer_count.values())
return ' '.join([k for k, v in kmer_count.items() if v == temp])
if __name__ == '__main__':
dna = input()
k = int(input())
print(MostFreqKmer(dna, k))
| [
"[email protected]"
] | |
98d3e0a2b55395492fd6f642061fd76ad9b3a125 | 887371724feac0c71d6112e8a00e436a9ad3aa92 | /horizon/openstack_dashboard/dashboards/project/loadbalancers/tables.py | 1d08eb4ad69fbd25abcd1026708bc58501a2eb95 | [
"Apache-2.0"
] | permissive | sreenathmenon/openstackAuth | eb6e4eb2f8c2f619d8c59e0c0a79a2f6c177740f | f2a5f641103ebd4750de210ec1f3df5600f3ba19 | refs/heads/master | 2021-01-20T21:34:41.243654 | 2017-08-30T14:25:48 | 2017-08-30T14:25:48 | 101,769,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,014 | py | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import shortcuts
from django import template
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard import policy
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:loadbalancers:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:loadbalancers:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:loadbalancers:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(policy.PolicyTargetMixin, tables.Action):
name = "deletevip"
preempt = True
verbose_name = _("Delete VIP")
policy_rules = (("network", "delete_vip"),)
classes = ('btn-danger',)
def get_help_text(self, vip_id):
return _("Deleting VIP %s from this pool cannot be undone.") % vip_id
def allowed(self, request, datum=None):
if datum and datum.vip_id:
self.help_text = self.get_help_text(datum.vip_id)
return True
return False
def single(self, table, request, obj_id):
try:
vip_id = api.lbaas.pool_get(request, obj_id).vip_id
except Exception as e:
exceptions.handle(request,
_('Unable to locate VIP to delete. %s')
% e)
if vip_id is not None:
try:
api.lbaas.vip_delete(request, vip_id)
messages.success(request, _('Deleted VIP %s') % vip_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VIP. %s') % e)
class DeletePoolLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletepool"
policy_rules = (("network", "delete_pool"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Pool",
u"Delete Pools",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Pool",
u"Scheduled deletion of Pools",
count
)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
def delete(self, request, obj_id):
try:
api.lbaas.pool_delete(request, obj_id)
messages.success(request, _('Deleted pool %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete pool. %s') % e)
class DeleteMonitorLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletemonitor"
policy_rules = (("network", "delete_health_monitor"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Monitor",
u"Delete Monitors",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Monitor",
u"Scheduled deletion of Monitors",
count
)
def delete(self, request, obj_id):
try:
api.lbaas.pool_health_monitor_delete(request, obj_id)
messages.success(request, _('Deleted monitor %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete monitor. %s') % e)
class DeleteMemberLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletemember"
policy_rules = (("network", "delete_member"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Member",
u"Delete Members",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Member",
u"Scheduled deletion of Members",
count
)
def delete(self, request, obj_id):
try:
api.lbaas.member_delete(request, obj_id)
messages.success(request, _('Deleted member %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete member. %s') % e)
class UpdatePoolLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_link_url(self, member):
base_url = reverse("horizon:project:loadbalancers:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_link_url(self, monitor):
base_url = reverse("horizon:project:loadbalancers:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
class AddPMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:loadbalancers:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:loadbalancers:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class AddVIPFloatingIP(policy.PolicyTargetMixin, tables.LinkAction):
"""Add floating ip to VIP
This class is extremely similar to AssociateIP from
the instances page
"""
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, pool):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if hasattr(pool, "vip") and pool.vip:
vip = pool.vip
return not (hasattr(vip, "fip") and vip.fip)
return True
def get_link_url(self, datum):
base_url = reverse(self.url)
next_url = self.table.get_full_url()
params = {
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
if hasattr(datum, "vip") and datum.vip:
vip = datum.vip
params['port_id'] = vip.port_id
params = urlencode(params)
return "?".join([base_url, params])
class RemoveVIPFloatingIP(policy.PolicyTargetMixin, tables.Action):
"""Remove floating IP from VIP
This class is extremely similar to the project instance table
SimpleDisassociateIP feature, but just different enough to not
be able to share much code
"""
name = "disassociate"
preempt = True
icon = "unlink"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, pool):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
if hasattr(pool, "vip") and pool.vip:
vip = pool.vip
return (hasattr(vip, "fip") and vip.fip)
return False
def single(self, table, request, pool_id):
try:
pool = api.lbaas.pool_get(request, pool_id)
fips = api.network.tenant_floating_ip_list(request)
vip_fips = [fip for fip in fips
if fip.port_id == pool.vip.port_id]
if not vip_fips:
messages.info(request, _("No floating IPs to disassociate."))
else:
api.network.floating_ip_disassociate(request,
vip_fips[0].id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
class UpdatePoolsRow(tables.Row):
ajax = True
def get_data(self, request, pool_id):
pool = api.lbaas.pool_get(request, pool_id)
try:
vip = api.lbaas.vip_get(request, pool.vip_id)
pool.vip = vip
except Exception:
pass
try:
subnet = api.neutron.subnet_get(request, pool.subnet_id)
pool.subnet_name = subnet.cidr
except Exception:
pool.subnet_name = pool.subnet_id
return pool
STATUS_CHOICES = (
("Active", True),
("Down", True),
("Error", False),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of a Pool",
u"Active")),
("Down", pgettext_lazy("Current status of a Pool",
u"Down")),
("Error", pgettext_lazy("Current status of a Pool",
u"Error")),
("Created", pgettext_lazy("Current status of a Pool",
u"Created")),
("Pending_Create", pgettext_lazy("Current status of a Pool",
u"Pending Create")),
("Pending_Update", pgettext_lazy("Current status of a Pool",
u"Pending Update")),
("Pending_Delete", pgettext_lazy("Current status of a Pool",
u"Pending Delete")),
("Inactive", pgettext_lazy("Current status of a Pool",
u"Inactive")),
)
ADMIN_STATE_DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Load balancer", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Load balancer", u"DOWN")),
)
def get_vip_name(pool):
if hasattr(pool, "vip") and pool.vip:
template_name = 'project/loadbalancers/_pool_table_vip_cell.html'
context = {"vip": pool.vip, }
return template.loader.render_to_string(template_name, context)
else:
return None
def get_subnet(pool):
if hasattr(pool, "subnet") and pool.subnet:
template_name = 'project/loadbalancers/_pool_table_subnet_cell.html'
context = {"subnet": pool.subnet}
return template.loader.render_to_string(template_name, context)
else:
return None
class PoolsTable(tables.DataTable):
METHOD_DISPLAY_CHOICES = (
("round_robin", pgettext_lazy("load balancing method",
u"Round Robin")),
("least_connections", pgettext_lazy("load balancing method",
u"Least Connections")),
("source_ip", pgettext_lazy("load balancing method",
u"Source IP")),
)
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column(get_subnet, verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
method = tables.Column('lb_method',
verbose_name=_("LB Method"),
display_choices=METHOD_DISPLAY_CHOICES)
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
vip_name = tables.Column(get_vip_name, verbose_name=_("VIP"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "poolstable"
verbose_name = _("Pools")
status_columns = ["status"]
row_class = UpdatePoolsRow
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink,
AddVIPFloatingIP, RemoveVIPFloatingIP)
def get_pool_link(member):
return reverse("horizon:project:loadbalancers:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:loadbalancers:memberdetails",
args=(http.urlquote(member.id),))
class UpdateMemberRow(tables.Row):
ajax = True
def get_data(self, request, member_id):
member = api.lbaas.member_get(request, member_id)
try:
pool = api.lbaas.pool_get(request, member.pool_id)
member.pool_name = pool.name
except Exception:
member.pool_name = member.pool_id
return member
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "memberstable"
verbose_name = _("Members")
status_columns = ["status"]
row_class = UpdateMemberRow
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:loadbalancers:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
| [
"[email protected]"
] | |
1e254ef6262f6a2353a21e1b62b99ba344188ff8 | 19a4365d81507587ef09488edc7850c2227e7165 | /994.py | 96554ca2d2b849066c85334c4f0f81e9c29e95a0 | [] | no_license | akauntotesuto888/Leetcode-Lintcode-Python | 80d8d9870b3d81da7be9c103199dad618ea8739a | e2fc7d183d4708061ab9b610b3b7b9e2c3dfae6d | refs/heads/master | 2023-08-07T12:53:43.966641 | 2021-09-17T19:51:09 | 2021-09-17T19:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
rotten = []
m, n = len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 2:
rotten.append((i, j))
step = -1
while rotten:
new_rotten = []
for x, y in rotten:
for dx, dy in [(-1, 0), (1, 0), (0, 1), (0, -1)]:
new_x, new_y = x+dx, y+dy
if 0 <= new_x < m and 0 <= new_y < n and grid[new_x][new_y] == 1:
grid[new_x][new_y] = 2
new_rotten.append((new_x, new_y))
rotten = new_rotten
step += 1
for i in range(m):
for j in range(n):
if grid[i][j] == 1: return -1
return 0 if step == -1 else step | [
"[email protected]"
] | |
bf2bae30cb193ff8b262ba23d6ec0d870c3220ac | d51cf2fe640f3bd5f4c4247ffaa198a30a58d96a | /mongo_db_course/01_data_extraction/16_extract_airports_quiz.py | a968065384c5c95507f14c724185a35f039ff185 | [] | no_license | AlexSkrn/sqlite_bash_scripts | 44c6dbfc83b0e6be6126ede06e9ffb9d5805bae9 | 6f1d8c6581ace44a6d77c736cee6d5cdd40001b6 | refs/heads/master | 2021-07-09T08:43:48.193623 | 2021-04-08T19:44:29 | 2021-04-08T19:44:29 | 238,682,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Complete the 'extract_airports()' function so that it returns a list of airport
codes, excluding any combinations like "All".
Refer to the 'options.html' file in the tab above for a stripped down version
of what is actually on the website. The test() assertions are based on the
given file.
"""
from bs4 import BeautifulSoup
html_page = "options.html"
def extract_airports(page):
data = []
with open(page, "r") as html:
# do something here to find the necessary values
soup = BeautifulSoup(html, "lxml")
data = []
airport_list = soup.find(id='AirportList')
for option in airport_list.find_all('option'):
val = option['value']
if not val.startswith('All'):
data.append(val)
return data
def test():
data = extract_airports(html_page)
assert len(data) == 15
assert "ATL" in data
assert "ABR" in data
if __name__ == "__main__":
test()
| [
"[email protected]"
] | |
2d5b87286cdfe5d088153553ecc12ce5664f7f4a | 8ddda8fb6e5853126dcdafa3281c75071ada45c1 | /vyperlogix/gds/space.py | 52a4dad46eb96d4deecf17860057fc4ee2adde8b | [
"CC0-1.0"
] | permissive | raychorn/chrome_gui | a48f3f9d931922a018e894f891ccd952476cd1ee | f1fade70b61af12ee43c55c075aa9cfd32caa962 | refs/heads/master | 2022-12-19T19:46:04.656032 | 2020-10-08T14:45:14 | 2020-10-08T14:45:14 | 299,167,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | '''
This module provides a function that constructs a list containing
the sizes of directories under a specified directory.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import os
__version__ = "$Id: space.py,v 1.4 2002/08/21 12:41:49 donp Exp $"
listG = []
def GetTotalFileSize(dummy_param, directory, list_of_files):
'''Given a list of files and the directory they're in, add the
total size and directory name to the global list listG.
'''
global listG
currdir = os.getcwd()
os.chdir(directory)
total_size = 0
if len(list_of_files) != 0:
for file in list_of_files:
if file == ".." or file == ".": continue
size = os.stat(file)[6]
total_size = total_size + size
listG.append([total_size, directory])
os.chdir(currdir)
def GetSize(directory):
'''Returns a list of the form [ [a, b], [c, d], ... ] where
a, c, ... are the number of total bytes in the directory and
b, d, ... are the directory names. The indicated directory
is recursively descended and the results are sorted by directory
size with the largest directory at the beginning of the list.
'''
import os
global listG
listG = []
os.path.walk(directory, GetTotalFileSize, "")
listG.sort()
listG.reverse()
def ShowBiggestDirectories(directory):
import string
GetSize(directory)
# Get total number of bytes
total_size = 0
for dir in listG:
total_size = total_size + dir[0]
if total_size != 0:
print "For directory '%s': " % directory,
print "[total space = %.1f MB]" % (total_size / 1e6)
print " % MB Directory"
print "------ ----- " + "-" * 50
not_shown_count = 0
for dir in listG:
percent = 100.0 * dir[0] / total_size
dir[1] = string.replace(dir[1], "\\\\", "/")
if percent >= 0.1:
print "%6.1f %5d %s" % (percent, int(dir[0]/1e6), dir[1])
else:
not_shown_count = not_shown_count + 1
if not_shown_count > 0:
if not_shown_count > 1:
print " [%d directories not shown]" % not_shown_count
else:
print " [%d directory not shown]" % not_shown_count
if __name__ == '__main__':
import sys
name = sys.argv[0]
sys.argv = sys.argv[1:]
if len(sys.argv) == 0:
sys.argv.append(".")
ShowBiggestDirectories(sys.argv[0])
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.