blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71d9ef0d70866a4e18c09969ee810ff780a8146c | b434d3432decfd039b0a5b57a567478deefa4d32 | /backend/app/payments/__init__.py | 3aa807b7976475b0ab98c405cfaf156801e9924c | [] | no_license | elcolono/marryday | b895bca21337e0f3092f8eda4cac59b23c41b4b7 | 89e3caf5c2b0260d15259c6d72bc85d1f3dac81c | refs/heads/main | 2023-08-27T03:09:25.797531 | 2021-09-18T12:40:44 | 2021-09-18T12:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | default_app_config = "payments.apps.PaymentsConfig" | [
"[email protected]"
] | |
fef6b5cbd6467df66736475fcd841be9bc0cc929 | 84c4514c0d9588026f1f203c2d351df226170f75 | /python/itertools/permutations.py | bfacc64c73bf1bbc3b0ce55bba4154f974d6fe6c | [] | no_license | hiromichinomata/hackerrank | eafc1a902353f6bdac508f67cfa7eebdbfb2811f | bffca0f56c92b752706b5a9fb4c814f44ea5d14e | refs/heads/master | 2022-12-01T15:39:25.811250 | 2020-08-08T01:44:10 | 2020-08-08T01:44:10 | 264,445,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from itertools import permutations
s, num = input().strip().split()
s = sorted(s)
num = int(num)
for i in permutations(s, num):
print("".join(i))
| [
"[email protected]"
] | |
16db4fc999d70029f8e94677713d54ff4f1cca36 | f4335e8e7d3010506f570167bbba18156d3a4674 | /stubs/django/core/management/commands/diffsettings.pyi | 1bf6f90fade7e0b8e54afff184eba3267ee5ee24 | [] | no_license | rtpg/typehangar | 133686ea45ad6187b768290aeebda9cbcae25586 | 790d057497c4791a38f9e3e009b07935b4a12f45 | refs/heads/master | 2021-01-19T04:49:17.940793 | 2017-01-16T13:54:14 | 2017-01-16T13:54:14 | 69,260,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | pyi | # Stubs for django.core.management.commands.diffsettings (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
from django.core.management.base import BaseCommand
def module_to_dict(module, omittable: Any = ...): ...
class Command(BaseCommand):
help = ... # type: str
requires_system_checks = ... # type: bool
def add_arguments(self, parser): ...
def handle(self, **options): ...
| [
"[email protected]"
] | |
e468552fe67dcb111020cfc2ebd9623c74e0c240 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03720/s960059730.py | c3987b6c50c512aecd596e019b24702590445f5d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import sys, re
from math import ceil, floor, sqrt, pi, factorial, gcd
from copy import deepcopy
from collections import Counter, deque
from heapq import heapify, heappop, heappush
from itertools import accumulate, product, combinations, combinations_with_replacement
from bisect import bisect, bisect_left, bisect_right
from functools import reduce
from decimal import Decimal, getcontext
# input = sys.stdin.readline
def i_input(): return int(input())
def i_map(): return map(int, input().split())
def i_list(): return list(i_map())
def i_row(N): return [i_input() for _ in range(N)]
def i_row_list(N): return [i_list() for _ in range(N)]
def s_input(): return input()
def s_map(): return input().split()
def s_list(): return list(s_map())
def s_row(N): return [s_input for _ in range(N)]
def s_row_str(N): return [s_list() for _ in range(N)]
def s_row_list(N): return [list(s_input()) for _ in range(N)]
def lcm(a, b): return a * b // gcd(a, b)
sys.setrecursionlimit(10 ** 6)
INF = float('inf')
MOD = 10 ** 9 + 7
num_list = []
str_list = []
def main():
n, m = i_map()
for _ in range(m):
a, b = i_map()
num_list.append(a)
num_list.append(b)
num_counter = Counter(num_list)
for i in range(1,n+1):
print(num_counter[i])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cc23354f1ac1be52b795119e99c44df6f9b9a574 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /hackerrank/algorithm/lonly_integer.py | 49cc044edcb98b61afa115495f50c34b58c36815 | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from collections import Counter
_ = int(input())
for key, val in Counter(input().split()).items():
if val == 1:
print(key)
| [
"[email protected]"
] | |
e8f56efacae6ebed48b265ae2ae07847dcfaeb1d | 9b87fc7054bedaef1bbfe2842bfca12d5585119b | /nicegui/elements/custom_example.py | ab8af2bcd42916f997d1d55803d71709488c011e | [
"MIT"
] | permissive | TrendingTechnology/nicegui | cb08287c9b0cab7ae1a831ee623a056d8ecdee43 | 68fa24456497683417d2e613ec573673deacd7f7 | refs/heads/main | 2023-06-20T06:11:52.914008 | 2021-07-22T05:09:40 | 2021-07-22T05:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from .custom_view import CustomView
from .element import Element
class CustomExampleView(CustomView):
def __init__(self, on_change):
super().__init__('custom_example', __file__, value=0)
self.on_change = on_change
self.allowed_events = ['onAdd']
self.initialize(temp=False, onAdd=self.handle_add)
def handle_add(self, msg):
self.options.value += msg.number
if self.on_change is not None:
return self.on_change(self.options.value)
return False
class CustomExample(Element):
def __init__(self, *, on_change=None):
super().__init__(CustomExampleView(on_change))
def add(self, number: str):
self.view.options.value += number
self.view.on_change(self.view.options.value)
| [
"[email protected]"
] | |
25e4a10195a5b94ecb830ef0b1d184c9feda747f | 58ffe83eb9828668c13242c6f98238f08655f561 | /app/notebooks/problang/transcript_utils.py | cdb2f5a88761411c4cf30c48af9b83fd05e1dcf8 | [
"Apache-2.0"
] | permissive | DanFu09/esper | f9dcc47cd5677dee8dffb1e066d69332471a0d6c | ccc5547de3637728b8aaab059b6781baebc269ec | refs/heads/master | 2020-04-04T21:31:43.549572 | 2020-01-16T01:14:13 | 2020-01-16T01:14:13 | 156,289,533 | 4 | 0 | Apache-2.0 | 2018-12-14T03:01:02 | 2018-11-05T22:05:07 | Jupyter Notebook | UTF-8 | Python | false | false | 7,054 | py | import numpy as np
import torch
from torch.utils.data import Dataset
import requests
from query.models import Video
from timeit import default_timer as now
from esper.prelude import pcache
import random
SEGMENT_SIZE = 200
SEGMENT_STRIDE = 100
def video_list():
r = requests.get('http://localhost:8111/videos')
return r.json()
def get_doc(item):
r = requests.post('http://localhost:8111/getdoc', json={'phrases': [item]})
return r.json()
def doc_len():
r = requests.get('http://localhost:8111/doclen')
return r.json()
def compute_vectors(docs, vocabulary, window_size, stride):
requests.post('http://localhost:8111/computevectors', json={
'vocabulary': vocabulary,
'docs': docs,
'window_size': window_size,
'stride': stride
})
def find_segments(docs, lexicon, threshold, window_size, stride):
r = requests.post('http://localhost:8111/findsegments', json={
'lexicon': lexicon,
'threshold': threshold,
'window_size': window_size,
'merge_overlaps': False,
'stride': stride,
'docs': docs
})
return r.json()
def small_video_sample():
videos = []
id = 1
while len(videos) < 10:
try:
v = Video.objects.get(id=id)
get_doc(v)
videos.append(v)
except Exception:
pass
id += 1
return videos
def word_counts():
r = requests.get('http://localhost:8111/wordcounts')
return r.json()
VOCAB_THRESHOLD = 100
def load_vocab():
counts = word_counts()
print('Full vocabulary size: {}'.format(len(counts)))
vocabulary = sorted([word for (word, count) in counts.items() if count > VOCAB_THRESHOLD])
print('Filtered vocabulary size: {}'.format(len(vocabulary)))
return vocabulary
vocabulary = pcache.get('vocabulary', load_vocab)
vocab_size = len(vocabulary)
class SegmentTextDataset(Dataset):
def __init__(self, docs, vocabulary=None, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False):
self._segment_size = segment_size
self._use_cuda = use_cuda
self._vocabulary = vocabulary
self._doc_names = docs
self._doc_lens = doc_len()
self._num_segs = np.array([
len(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
for doc in self._doc_names
])
self._back_index = [
(i, j, k)
for i, doc in enumerate(self._doc_names)
for k, j in enumerate(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
]
self._forward_index = {
(self._doc_names[i], j): k
for k, (i, j, _) in enumerate(self._back_index)
}
self._docs = {}
self._segs = {}
def segment_index(self, doc, word):
return self._forward_index[(doc, word)]
def _text_to_vector(self, words):
counts = defaultdict(int)
for w in words:
counts[w] += 1
t = torch.tensor([counts[word] for word in self._vocabulary], dtype=torch.float32)
t /= torch.sum(t)
return t
def __len__(self):
return self._num_segs.sum()
def __getitem__(self, idx):
(i, j, _) = self._back_index[idx]
if not (i, j) in self._segs:
if not i in self._docs:
self._docs[i] = get_doc(self._doc_names[i])
seg = self._docs[i][j:j+self._segment_size]
data = {
'document_idx': i,
'segment_idx': j,
}
if self._vocabulary is not None:
data['vector'] = self._text_to_vector(seg)
if self._use_cuda:
data['vector'] = data['vector'].cuda()
data['segment'] = ' '.join(seg)
self._segs[(i, j)] = data
return self._segs[(i, j)]
import mmap
class SegmentVectorDataset(Dataset):
def __init__(self, docs, vocab_size, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False, inmemory=False):
self._ds = SegmentTextDataset(docs, segment_size=segment_size, segment_stride=segment_stride)
self._doc_names = docs
self._vocab_size = vocab_size
self._use_cuda = use_cuda
self._inmemory = inmemory
self._file_handle = open('/app/data/segvectors.bin', 'r+b')
self._file = mmap.mmap(self._file_handle.fileno(), 0)
self._byte_offsets = []
if self._inmemory:
self._buffer = self._file.read()
# Compute prefix sum of document offsets
for i, doc in enumerate(self._doc_names):
dlen = self._ds._num_segs[i-1] * self._vocab_size
if i == 0:
self._byte_offsets.append(0)
else:
self._byte_offsets.append(self._byte_offsets[i - 1] + dlen)
def _byte_offset(self, idx):
(i, _, j) = self._ds._back_index[idx]
return self._byte_offsets[i] + j * self._vocab_size
def __len__(self):
return len(self._ds)
def __getitem__(self, idx):
offset = self._byte_offset(idx)
if self._inmemory:
byts = self._buffer[offset:offset+self._vocab_size]
else:
self._file.seek(offset)
byts = self._file.read(self._vocab_size)
assert len(byts) == self._vocab_size, \
'Invalid read at index {}, offset {}. Expected {} bytes, got {}'.format(idx, offset, self._vocab_size, len(byts))
npbuf = np.frombuffer(byts, dtype=np.uint8)
tbuf = torch.from_numpy(npbuf).float()
tbuf /= torch.sum(tbuf)
if self._use_cuda:
tbuf = tbuf.cuda()
return tbuf, idx
class LabeledSegmentDataset(Dataset):
def __init__(self, unlabeled_dataset, labels, categories):
self._ds = unlabeled_dataset
self._labels = labels
self._categories = categories
def __len__(self):
return len(self._labels)
def __getitem__(self, idx):
(seg_idx, label) = self._labels[idx]
label = torch.tensor([1 if label == i else 0 for i in range(self._categories)], dtype=torch.float32)
if self._ds._use_cuda:
label = label.cuda()
tbuf, _ = self._ds[seg_idx]
return tbuf, label, seg_idx
def label_widget(dataset, indices, done_callback):
from IPython.display import display, clear_output
from ipywidgets import Text, HTML, Button
labels = []
i = 0
transcript = HTML(dataset[indices[0]]['segment'])
box = Text(placeholder='y/n')
def on_submit(text):
nonlocal i
label = 1 if text.value == 'y' else 0
labels.append((indices[i], label))
i += 1
transcript.value = dataset[indices[i]]['segment']
box.value = ''
box.on_submit(on_submit)
finished = False
btn_finished = Button(description='Finished')
def on_click(b):
done_callback(labels)
btn_finished.on_click(on_click)
display(transcript)
display(box)
display(btn_finished)
| [
"[email protected]"
] | |
6310996c29f82720e743d2c1c5d7c036e79d4a73 | d93c91e904470b46e04a4eadb8c459f9c245bb5a | /banglore_scrape/proptiger/proptiger/spiders/proptigerresale.py | 47b05e9f213ad8c5615011068e0591a29f338475 | [] | no_license | nbourses/scrappers | 3de3cd8a5408349b0ac683846b9b7276156fb08a | cde168a914f83cd491dffe85ea24aa48f5840a08 | refs/heads/master | 2021-03-30T15:38:29.096213 | 2020-03-25T03:23:56 | 2020-03-25T03:23:56 | 63,677,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,099 | py | import scrapy
from proptiger.items import ProptigerItem
from scrapy.spiders import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
import json
from scrapy.selector import XmlXPathSelector
import lxml.etree as etree
from urlparse import urljoin
import urllib
import time
from datetime import datetime as dt
class PropRentSpider(Spider):
name = "proptigerresaleBangalore"
start_urls = ['https://www.proptiger.com/data/v2/entity/resale-listing?selector={%22filters%22:{%22and%22:[{%22equal%22:{%22bookingStatusId%22:1}},{%22equal%22:{%22cityId%22:2}}]},%22paging%22:{%22start%22:0,%22rows%22:15}}']
allowed_domains = ["www.proptiger.com"]
rules = (Rule(LinkExtractor(deny=(), allow=('http://www.proptiger.com/'), ), callback='parse', follow=True, ),)
custom_settings = {
'DEPTH_LIMIT': 10000,
'DOWNLOAD_DELAY': 2
}
def parse(self, response):
jr = response.body
jd = json.loads(jr)
handle_http_list = [500]
path = jd["data"]
base_url = "https://www.proptiger.com/"
max_page = int(jd["totalCount"])
cur_page = int(response.url.split(':')[-2].split(',')[0])
cur_page1 = cur_page + 15
page_num =str(cur_page1)
url = 'https://www.proptiger.com/data/v2/entity/resale-listing?selector={{%22filters%22:{{%22and%22:[{{%22equal%22:{{%22bookingStatusId%22:1}}}},{{%22equal%22:{{%22cityId%22:2}}}}]}},%22paging%22:{{%22start%22:{x},%22rows%22:15}}}}'.format(x=str(cur_page1))
for i in range(0,len(path)):
if (i+cur_page) == (max_page):
break
item = ProptigerItem()
item['data_id'] = path[i]['propertyId']
try:
item['listing_by'] = path[i]['companySeller']['company']['type']
except:
item['listing_by'] = 'None'
try:
item['name_lister'] = path[i]['companySeller']['user']['fullName']
except:
item['name_lister'] = 'None'
try:
item['mobile_lister'] = path[i]['companySeller']['user']['contactNumbers'][0]['contactNumber']
except:
item['mobile_lister'] = 'None'
try:
item['price_per_sqft'] = path[i]['currentListingPrice']['pricePerUnitArea']
except:
item['price_per_sqft'] = '0'
try:
item['Selling_price'] = str(path[i]['currentListingPrice']['price'])
except:
item['Selling_price'] = '0'
item['Monthly_Rent'] = '0'
try:
dt1 = int(path[i]['currentListingPrice']['createdAt'] * 0.001)
item['listing_date'] = time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(dt1))
except:
item['listing_date'] = '0'
try:
dt2 = int(path[i]['currentListingPrice']['updatedAt'] * 0.001)
item['updated_date'] = time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt2))
except:
item['updated_date'] = '0'
try:
item['lat'] = path[i]['latitude']
except:
item['lat'] = '0'
try:
item['longt'] = path[i]['longitude']
except:
item['longt'] = '0'
try:
item['txn_type'] = path[i]['listingCategory']
except:
item['txn_type'] = 'None'
try:
item['config_type'] = str(path[i]['property']['bedrooms']) + 'BHK'
except:
item['config_type'] = 'None'
try:
item['property_type'] = path[i]['property']['unitType']
except:
item['property_type'] = 'None'
try:
item['Bua_sqft'] = str(path[i]['property']['size'])
except:
item['Bua_sqft'] = '0'
try:
item['carpet_area'] = str(path[i]['property']['carpetArea'])
except:
item['carpet_area'] = '0'
try:
item['areacode'] = path[i]['property']['project']['localityId']
except:
item['areacode'] = 'None'
try:
item['city'] = path[i]['property']['project']['locality']['suburb']['city']['label']
except:
item['city'] = 'None'
try:
item['locality'] = path[i]['property']['project']['locality']['suburb']['label']
except:
item['locality'] = 'None'
try:
item['sublocality'] = path[i]['property']['project']['locality']['label']
except:
item['sublocality'] = 'None'
try:
item['Building_name'] = path[i]['property']['project']['locality']['newsTag']
except:
item['Building_name'] = 'None'
try:
dt3 = int(path[i]['property']['project']['launchDate'] * 0.001)
item['Launch_date'] = str(time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt3)))
except:
item['Launch_date'] = '0'
try:
item['address'] = path[i]['property']['project']['address']
except:
item['address'] = 'None'
try:
dt4 = int(path[i]['property']['project']['possessionDate'] * 0.001)
item['Possession'] = str(time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt4)))
except:
item['Possession'] = '0'
try:
item['Status'] = path[i]['property']['project']['projectStatus']
except:
item['Status'] = 'None'
try:
item['platform'] = path[i]['listingSourceDomain']
except:
item['platform'] = 'None'
item['management_by_landlord'] = 'None'
item['google_place_id'] = 'None'
item['age'] = 'None'
if item['Selling_price'] == '0' and item['Monthly_Rent'] == '0':
item['price_on_req'] = 'true'
else:
item['price_on_req'] = 'false'
item['Details'] = path[i]['property']['project']['description']
item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')
if (((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['price_per_sqft'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0'))):
item['quality4'] = 1
elif (((not item['price_per_sqft'] == '0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None')) or ((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None'))):
item['quality4'] = 0.5
else:
item['quality4'] = 0
if ((not item['Building_name'] == 'None') and (not item['listing_date'] == '0') and (not item['txn_type'] == 'None') and (not item['property_type'] == 'None') and ((not item['Selling_price'] == '0') or (not item['Monthly_Rent'] == '0'))):
item['quality1'] = 1
else:
item['quality1'] = 0
if ((not item['Launch_date'] == '0') and (not item['Possession'] == '0')):
item['quality2'] = 1
else:
item['quality2'] = 0
if ((not item['mobile_lister'] == 'None') or (not item['listing_by'] == 'None') or (not item['name_lister'] == 'None')):
item['quality3'] = 1
else:
item['quality3'] = 0
yield item
if (cur_page+15) < ( max_page):
yield Request(url, callback=self.parse) | [
"[email protected]"
] | |
8d12ea6102055c34798e687b5a6532f7642b276f | 1311696a180047135c825ffa283f9ac9750d4236 | /tests/data/stubs-ok/micropython-linux-1_12/websocket.py | 84603dedea90d09964895308d20f7dfc0ad0c2bf | [
"MIT"
] | permissive | Josverl/micropython-stubber | 71103afa842da02d5ad074b541d9bff7243ce23f | 68fe9113f4b4e611bb4c3d19f79c8ba0e7111f5e | refs/heads/main | 2023-08-31T00:51:22.200348 | 2023-05-31T07:48:54 | 2023-05-31T07:48:54 | 177,823,007 | 135 | 8 | NOASSERTION | 2023-09-11T21:25:19 | 2019-03-26T16:00:53 | Python | UTF-8 | Python | false | false | 546 | py | """
Module: 'websocket' on micropython-linux-1.12
"""
# MCU: {'ver': '1.12', 'port': 'linux', 'arch': 'x64', 'sysname': 'unknown', 'release': '1.12.0', 'name': 'micropython', 'mpy': 2821, 'version': '1.12.0', 'machine': 'unknown', 'build': '', 'nodename': 'unknown', 'platform': 'linux', 'family': 'micropython'}
# Stubber: 1.3.6
class websocket:
''
def close():
pass
def ioctl():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def write():
pass
| [
"[email protected]"
] | |
91fe8bdac939808480646276789f56bc2fd0c450 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_227/ch149_2020_04_13_20_21_26_194548.py | 50b04d13c34b8c1459a9db8abfd23816a3214e2e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | salario_bruto=float(input("Qual o seu salário bruto? "))
numero_dependentes=int(input("Qual o seu número de dependentes? "))
if salario_bruto<=1.045:
INSS=salario_bruto*0.075
elif salario_bruto>=1045.01 and salario_bruto<=2089.60:
INSS=salario_bruto*0.09
elif salario_bruto>=2089.61 and salario_bruto<=3134.40:
INSS=salario_bruto*0.12
elif salario_bruto>=3134.41 and salario_bruto<=6101.06:
INSS=salario_bruto*0.14
else:
INSS=671.12
base_de_calculo=salario_bruto-INSS-(numero_dependentes*189.59)
if base_de_calculo<=1903.98:
IRRF=0
elif base_de_calculo>=1903.99 and base_de_calculo<=2826.65:
IRRF=(base_de_calculo*0.075)-142.80
elif base_de_calculo>=2826.65 and base_de_calculo<=3751.05:
IRRF=(base_de_calculo*0.15)-354.80
elif base_de_calculo>=3751.06 and base_de_calculo<=4664.68:
IRRF=(base_de_calculo*0.225)-636.13
else:
IRRF=(base_de_calculo*0.275)-869.36
print(IRRF) | [
"[email protected]"
] | |
96acef88bac936107e4d65c64c0f6929293a8933 | 888f519f9831cc8e172a81693dc318514d0b45fe | /bnv-ufo/particles.py | 4eb0deeafa342ddc420fe665bbdef60477465531 | [] | no_license | mattbellis/generate_private_MC_from_LHE_files | ca69093c19b16f79291f97c8dc4863f5dc4b73d5 | 16f4099a91488e4e030ceec62efbb157351d3793 | refs/heads/master | 2021-06-08T02:18:48.233276 | 2021-04-24T15:21:50 | 2021-04-24T15:21:50 | 151,903,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,968 | py | # This file was automatically created by FeynRules $Revision: 845 $
# Mathematica version: 8.0 for Linux x86 (64-bit) (November 7, 2010)
# Date: Tue 7 Feb 2012 13:57:29
from __future__ import division
from object_library import all_particles, Particle
import parameters as Param
ve = Particle(pdg_code = 12,
name = 've',
antiname = 've~',
spin = 2,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 've',
antitexname = 've',
charge = 0,
LeptonNumber = 1,
GhostNumber = 0)
ve__tilde__ = ve.anti()
vm = Particle(pdg_code = 14,
name = 'vm',
antiname = 'vm~',
spin = 2,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'vm',
antitexname = 'vm',
charge = 0,
LeptonNumber = 1,
GhostNumber = 0)
vm__tilde__ = vm.anti()
vt = Particle(pdg_code = 16,
name = 'vt',
antiname = 'vt~',
spin = 2,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'vt',
antitexname = 'vt',
charge = 0,
LeptonNumber = 1,
GhostNumber = 0)
vt__tilde__ = vt.anti()
e__minus__ = Particle(pdg_code = 11,
name = 'e-',
antiname = 'e+',
spin = 2,
color = 1,
mass = Param.Me,
width = Param.ZERO,
texname = 'e-',
antitexname = 'e-',
charge = -1,
LeptonNumber = 1,
GhostNumber = 0)
e__plus__ = e__minus__.anti()
m__minus__ = Particle(pdg_code = 13,
name = 'm-',
antiname = 'm+',
spin = 2,
color = 1,
mass = Param.MM,
width = Param.ZERO,
texname = 'm-',
antitexname = 'm-',
charge = -1,
LeptonNumber = 1,
GhostNumber = 0)
m__plus__ = m__minus__.anti()
tt__minus__ = Particle(pdg_code = 15,
name = 'tt-',
antiname = 'tt+',
spin = 2,
color = 1,
mass = Param.MTA,
width = Param.ZERO,
texname = 'tt-',
antitexname = 'tt-',
charge = -1,
LeptonNumber = 1,
GhostNumber = 0)
tt__plus__ = tt__minus__.anti()
u = Particle(pdg_code = 2,
name = 'u',
antiname = 'u~',
spin = 2,
color = 3,
mass = Param.MU,
width = Param.ZERO,
texname = 'u',
antitexname = 'u',
charge = 2/3,
LeptonNumber = 0,
GhostNumber = 0)
u__tilde__ = u.anti()
c = Particle(pdg_code = 4,
name = 'c',
antiname = 'c~',
spin = 2,
color = 3,
mass = Param.MC,
width = Param.ZERO,
texname = 'c',
antitexname = 'c',
charge = 2/3,
LeptonNumber = 0,
GhostNumber = 0)
c__tilde__ = c.anti()
t = Particle(pdg_code = 6,
name = 't',
antiname = 't~',
spin = 2,
color = 3,
mass = Param.MT,
width = Param.WT,
texname = 't',
antitexname = 't',
charge = 2/3,
LeptonNumber = 0,
GhostNumber = 0)
t__tilde__ = t.anti()
d = Particle(pdg_code = 1,
name = 'd',
antiname = 'd~',
spin = 2,
color = 3,
mass = Param.MD,
width = Param.ZERO,
texname = 'd',
antitexname = 'd',
charge = -1/3,
LeptonNumber = 0,
GhostNumber = 0)
d__tilde__ = d.anti()
s = Particle(pdg_code = 3,
name = 's',
antiname = 's~',
spin = 2,
color = 3,
mass = Param.MS,
width = Param.ZERO,
texname = 's',
antitexname = 's',
charge = -1/3,
LeptonNumber = 0,
GhostNumber = 0)
s__tilde__ = s.anti()
b = Particle(pdg_code = 5,
name = 'b',
antiname = 'b~',
spin = 2,
color = 3,
mass = Param.MB,
width = Param.ZERO,
texname = 'b',
antitexname = 'b',
charge = -1/3,
LeptonNumber = 0,
GhostNumber = 0)
b__tilde__ = b.anti()
ghA = Particle(pdg_code = 9000001,
name = 'ghA',
antiname = 'ghA~',
spin = -1,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghA',
antitexname = 'ghA',
charge = 0,
LeptonNumber = 0,
GhostNumber = 1)
ghA__tilde__ = ghA.anti()
ghZ = Particle(pdg_code = 9000002,
name = 'ghZ',
antiname = 'ghZ~',
spin = -1,
color = 1,
mass = Param.MZ,
width = Param.ZERO,
texname = 'ghZ',
antitexname = 'ghZ',
charge = 0,
LeptonNumber = 0,
GhostNumber = 1)
ghZ__tilde__ = ghZ.anti()
ghWp = Particle(pdg_code = 9000003,
name = 'ghWp',
antiname = 'ghWp~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = 'ghWp',
antitexname = 'ghWp',
charge = 1,
LeptonNumber = 0,
GhostNumber = 1)
ghWp__tilde__ = ghWp.anti()
ghWm = Particle(pdg_code = 9000004,
name = 'ghWm',
antiname = 'ghWm~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = 'ghWm',
antitexname = 'ghWm',
charge = -1,
LeptonNumber = 0,
GhostNumber = 1)
ghWm__tilde__ = ghWm.anti()
ghG = Particle(pdg_code = 9000005,
name = 'ghG',
antiname = 'ghG~',
spin = -1,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghG',
antitexname = 'ghG',
charge = 0,
LeptonNumber = 0,
GhostNumber = 1)
ghG__tilde__ = ghG.anti()
A = Particle(pdg_code = 22,
name = 'A',
antiname = 'A',
spin = 3,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'A',
antitexname = 'A',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
Z = Particle(pdg_code = 23,
name = 'Z',
antiname = 'Z',
spin = 3,
color = 1,
mass = Param.MZ,
width = Param.WZ,
texname = 'Z',
antitexname = 'Z',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
W__plus__ = Particle(pdg_code = 24,
name = 'W+',
antiname = 'W-',
spin = 3,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'W+',
antitexname = 'W+',
charge = 1,
LeptonNumber = 0,
GhostNumber = 0)
W__minus__ = W__plus__.anti()
G = Particle(pdg_code = 21,
name = 'G',
antiname = 'G',
spin = 3,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'G',
antitexname = 'G',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
H = Particle(pdg_code = 25,
name = 'H',
antiname = 'H',
spin = 1,
color = 1,
mass = Param.MH,
width = Param.WH,
texname = '\\phi',
antitexname = '\\phi',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
phi0 = Particle(pdg_code = 250,
name = 'phi0',
antiname = 'phi0',
spin = 1,
color = 1,
mass = Param.MZ,
width = Param.ZERO,
texname = 'phi0',
antitexname = 'phi0',
GoldstoneBoson = True,
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
phi__plus__ = Particle(pdg_code = 251,
name = 'phi+',
antiname = 'phi-',
spin = 1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = '\\phi^+',
antitexname = '\\phi^+',
GoldstoneBoson = True,
charge = 1,
LeptonNumber = 0,
GhostNumber = 0)
phi__minus__ = phi__plus__.anti()
| [
"[email protected]"
] | |
06ee5f8ff46617f38f61ac547a3d6c951b8fb803 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_virtual_wans_operations.py | fe0a31731f0cf1bff2e9d3a1a2d94cbec51a6e1a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 30,068 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualWANsOperations:
"""VirtualWANsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> "_models.VirtualWAN":
"""Retrieves the details of a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being retrieved.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualWAN, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.VirtualWAN
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.VirtualWAN",
**kwargs
) -> "_models.VirtualWAN":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(wan_parameters, 'VirtualWAN')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.VirtualWAN",
**kwargs
) -> AsyncLROPoller["_models.VirtualWAN"]:
"""Creates a VirtualWAN resource if it doesn't exist else updates the existing VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being created or updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to create or update VirtualWAN.
:type wan_parameters: ~azure.mgmt.network.v2018_07_01.models.VirtualWAN
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualWAN or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.VirtualWAN]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
wan_parameters=wan_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VirtualWAN":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(wan_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VirtualWAN"]:
"""Updates a VirtualWAN tags.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to Update VirtualWAN tags.
:type wan_parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualWAN or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.VirtualWAN]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
wan_parameters=wan_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being deleted.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVirtualWANsResult"]:
"""Lists all the VirtualWANs in a resource group.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualWANsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ListVirtualWANsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualWANsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualWANsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVirtualWANsResult"]:
"""Lists all the VirtualWANs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualWANsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ListVirtualWANsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualWANsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualWANsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualWans'} # type: ignore
| [
"[email protected]"
] | |
1921637bf67204f6d4521f412444523581176738 | afb16c3188bf06af65ae0d998e114c72342bd8be | /note/demo/python_trace/demo2.py | 69e2891cccff56b373a8630dfd6f7efb23775614 | [] | no_license | onsunsl/onsunsl.github.io | aa75f399f1c647bc2e62314633bfe35187e59ad4 | 4ed2b1b9a2407afcbffdf304020d42b81c4c8cdc | refs/heads/master | 2023-05-26T12:33:11.167270 | 2023-04-01T10:18:05 | 2023-04-01T10:18:05 | 237,595,319 | 1 | 0 | null | 2023-05-23T20:13:11 | 2020-02-01T10:02:58 | Python | UTF-8 | Python | false | false | 490 | py | import os
from time import sleep
import signal
import sys
from traceback import extract_stack
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
f = open("./1.txt", "w")
f.write("sigterm_handler")
f.close()
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
try:
print(os.getpid(), os.getppid())
print("Hello")
i = 0
while True:
i += 1
print("Iteration #%i" % i)
sleep(1)
finally:
print("Goodbye") | [
"[email protected]"
] | |
e3bcf5984f2cde90d396e03b2e11d82015d67e8c | 3cedc7c1519d3b013aad9ec4e6a6ee7834da7589 | /python_code/多线程开发/E_多线程使用共享数据.py | 65fc69e75f8ee5a199ae857933d77ea27bd7330c | [] | no_license | hzrg/songqin_course | 53437100669ee93d2ac5ecae5de938b1a4007d7f | 05e422ce34a42fd6d3819722a19252f8005e79ed | refs/heads/master | 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # coding=utf8
import threading
from time import sleep
# 存储支付宝账号余额
zhifubao = {
'jcy' : 2000,
'liming' : 5000,
'wangan' : 15000,
'zhaolei' : 6005000,
}
# 线程1 : 滴滴打车处理,参数是用户账户和扣款金额
def thread1_didi_pay(account,amount):
print('* t1: get balance from bank')
balance = zhifubao[account]
# 下面的sleep(2) 表示一些处理过程需要花上2秒钟
print('* t1: do something(like discount lookup) for 2 seconds')
sleep(2)
print('* t1: deduct')
zhifubao[account] = balance - amount
# 线程2 : 余额宝处理,参数是用户账户和当前利息
def thread2_yuebao_interest(account,amount):
print('$ t2: get balance from bank')
balance = zhifubao[account]
# 下面的sleep(1) 表示一些处理过程需要花上1秒钟
print('$ t2: do something2.... for 1 seconds')
sleep(1)
print('$ t2: add')
zhifubao[account] = balance + amount
t1 = threading.Thread(target=thread1_didi_pay, args=('jcy',10))
t2 = threading.Thread(target=thread2_yuebao_interest, args=('jcy',10))
t1.start()
t2.start()
t1.join()
t2.join()
print('finally, jcy balance is %s' % zhifubao['jcy'])
"""
正常来说,金额应该不变的,但是由于使用共享数据,导致的问题,
2个线程同时start,同时使用的是共享的数据2000,第二个线程
先结束,变成2010,存回列表,但是第一个线程此时使用的还是开始的2000,
第一个线程结束后,就是1990,覆盖掉2010;
解决方法,加锁。
""" | [
"[email protected]"
] | |
4d2a3ab4f356b1581b21a231111a088874cc611e | afd2087e80478010d9df66e78280f75e1ff17d45 | /torch/onnx/_internal/diagnostics/infra/sarif/_suppression.py | c1dcb014809d994a4777917e5e1764388b48dff5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | pytorch/pytorch | 7521ac50c47d18b916ae47a6592c4646c2cb69b5 | a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4 | refs/heads/main | 2023-08-03T05:05:02.822937 | 2023-08-03T00:40:33 | 2023-08-03T04:14:52 | 65,600,975 | 77,092 | 24,610 | NOASSERTION | 2023-09-14T21:58:39 | 2016-08-13T05:26:41 | Python | UTF-8 | Python | false | false | 1,249 | py | # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag
@dataclasses.dataclass
class Suppression(object):
"""A suppression that is relevant to a result."""
kind: Literal["inSource", "external"] = dataclasses.field(
metadata={"schema_property_name": "kind"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
justification: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "justification"}
)
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
state: Optional[Literal["accepted", "underReview", "rejected"]] = dataclasses.field(
default=None, metadata={"schema_property_name": "state"}
)
# flake8: noqa
| [
"[email protected]"
] | |
7691802558073b399b3e21487c2b7faf90c162dc | b250b3f74b30ad29f65acab3040433473a259cc1 | /src/_23a.py | cdd79900dd2c709eacf9c37588896d815d22132b | [] | no_license | Abarn279/advent-of-code-2015 | 0cc6ce58ba443335fd9dcd451e327cec01fd3e96 | 8fbf0b2bc576556d5351d64b93c972a6f6ec8020 | refs/heads/master | 2021-06-28T09:11:28.905618 | 2020-11-30T22:02:10 | 2020-11-30T22:02:10 | 75,760,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | reg = {'a':1, 'b':0}
prog = '''jio a, +19
inc a
tpl a
inc a
tpl a
inc a
tpl a
tpl a
inc a
inc a
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
jmp +23
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
tpl a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
tpl a
inc a
jio a, +8
inc b
jie a, +4
tpl a
inc a
jmp +2
hlf a
jmp -7
'''.split('\n')
i = 0
while i < len(prog):
line = prog[i]
inst = line[:3]
if inst == 'hlf':
r = prog[i].split(' ')[1]
reg[r] = reg[r] / 2
elif inst == 'tpl':
r = prog[i].split(' ')[1]
reg[r] = reg[r] * 3
elif inst == 'inc':
r = prog[i].split(' ')[1]
reg[r] = reg[r] + 1
elif inst == 'jmp':
o = prog[i].split(' ')[1]
i = i + int(o)
continue
elif inst == 'jie':
pass
[inst, r, o] = prog[i].split(' ')
r = r[:-1]
if reg[r] % 2 == 0:
i = i + int(o)
continue
elif inst == 'jio':
[inst, r, o] = prog[i].split(' ')
r = r[:-1]
if reg[r] == 1:
i = i + int(o)
continue
i += 1
print(reg) | [
"[email protected]"
] | |
9491cccb3a1203f18678ca88d25a374d6c280612 | a06fd6b7b4e5fc2b1b5a46b4edd20a11f717a5ea | /netbox/extras/filters.py | d0a801b481f55cfc6f08e7f6c154b2c803fd170f | [
"Apache-2.0"
] | permissive | feiynagly/netbox | d9be722eaa5021cf39e82c19c3e4562dedd94254 | d364bbbaa6ee4f2a19015d07dd0de855628befb4 | refs/heads/master | 2022-12-04T04:41:29.052349 | 2021-05-11T07:13:56 | 2021-05-11T07:13:56 | 173,664,986 | 1 | 1 | Apache-2.0 | 2022-11-22T03:12:55 | 2019-03-04T03:10:07 | Python | UTF-8 | Python | false | false | 7,182 | py | import django_filters
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from taggit.models import Tag
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from .constants import CF_FILTER_DISABLED, CF_FILTER_EXACT, CF_TYPE_BOOLEAN, CF_TYPE_SELECT
from .models import ConfigContext, CustomField, Graph, ExportTemplate, ObjectChange, TopologyMap
class CustomFieldFilter(django_filters.Filter):
"""
Filter objects by the presence of a CustomFieldValue. The filter's name is used as the CustomField name.
"""
def __init__(self, custom_field, *args, **kwargs):
self.cf_type = custom_field.type
self.filter_logic = custom_field.filter_logic
super().__init__(*args, **kwargs)
def filter(self, queryset, value):
# Skip filter on empty value
if value is None or not value.strip():
return queryset
# Selection fields get special treatment (values must be integers)
if self.cf_type == CF_TYPE_SELECT:
try:
# Treat 0 as None
if int(value) == 0:
return queryset.exclude(
custom_field_values__field__name=self.field_name,
)
# Match on exact CustomFieldChoice PK
else:
return queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value=value,
)
except ValueError:
return queryset.none()
# Apply the assigned filter logic (exact or loose)
if self.cf_type == CF_TYPE_BOOLEAN or self.filter_logic == CF_FILTER_EXACT:
queryset = queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value=value
)
else:
queryset = queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value__icontains=value
)
return queryset
class CustomFieldFilterSet(django_filters.FilterSet):
"""
Dynamically add a Filter for each CustomField applicable to the parent model.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
obj_type = ContentType.objects.get_for_model(self._meta.model)
custom_fields = CustomField.objects.filter(obj_type=obj_type).exclude(filter_logic=CF_FILTER_DISABLED)
for cf in custom_fields:
self.filters['cf_{}'.format(cf.name)] = CustomFieldFilter(field_name=cf.name, custom_field=cf)
class GraphFilter(django_filters.FilterSet):
class Meta:
model = Graph
fields = ['type', 'name']
class ExportTemplateFilter(django_filters.FilterSet):
class Meta:
model = ExportTemplate
fields = ['content_type', 'name']
class TagFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
class Meta:
model = Tag
fields = ['name', 'slug']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(slug__icontains=value)
)
class TopologyMapFilter(django_filters.FilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
field_name='site',
queryset=Site.objects.all(),
label='Site',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = TopologyMap
fields = ['name', 'slug']
class ConfigContextFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
region_id = django_filters.ModelMultipleChoiceFilter(
field_name='regions',
queryset=Region.objects.all(),
label='Region',
)
region = django_filters.ModelMultipleChoiceFilter(
field_name='regions__slug',
queryset=Region.objects.all(),
to_field_name='slug',
label='Region (slug)',
)
site_id = django_filters.ModelMultipleChoiceFilter(
field_name='sites',
queryset=Site.objects.all(),
label='Site',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='sites__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
field_name='roles',
queryset=DeviceRole.objects.all(),
label='Role',
)
role = django_filters.ModelMultipleChoiceFilter(
field_name='roles__slug',
queryset=DeviceRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
platform_id = django_filters.ModelMultipleChoiceFilter(
field_name='platforms',
queryset=Platform.objects.all(),
label='Platform',
)
platform = django_filters.ModelMultipleChoiceFilter(
field_name='platforms__slug',
queryset=Platform.objects.all(),
to_field_name='slug',
label='Platform (slug)',
)
tenant_group_id = django_filters.ModelMultipleChoiceFilter(
field_name='tenant_groups',
queryset=TenantGroup.objects.all(),
label='Tenant group',
)
tenant_group = django_filters.ModelMultipleChoiceFilter(
field_name='tenant_groups__slug',
queryset=TenantGroup.objects.all(),
to_field_name='slug',
label='Tenant group (slug)',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
field_name='tenants',
queryset=Tenant.objects.all(),
label='Tenant',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenants__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
class Meta:
model = ConfigContext
fields = ['name', 'is_active']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(description__icontains=value) |
Q(data__icontains=value)
)
class ObjectChangeFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
time = django_filters.DateTimeFromToRangeFilter()
class Meta:
model = ObjectChange
fields = ['user', 'user_name', 'request_id', 'action', 'changed_object_type', 'object_repr']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(user_name__icontains=value) |
Q(object_repr__icontains=value)
)
| [
"[email protected]"
] | |
5429336730859c3bf71a435d3f2b50b3bab5483e | 2d5c7eccef92f3cd71484607f0d595f7787053a2 | /p067.py | bbdc8a94b6df75d8be8acc1b1ae753f79e5bf803 | [
"MIT"
] | permissive | arpit0891/Project-Euler | 48a34f69e267d6bbcbf2ee30f3f88f4354df3afb | ab36b33c578578595bb518508fa2fe5862f4a044 | refs/heads/master | 2021-02-14T00:25:01.262715 | 2020-06-26T14:41:59 | 2020-06-26T14:41:59 | 244,749,988 | 1 | 3 | MIT | 2020-06-26T14:42:00 | 2020-03-03T21:56:34 | Python | UTF-8 | Python | false | false | 16,672 | py |
# We create a new blank triangle with the same dimensions as the original big triangle.
# For each cell of the big triangle, we consider the sub-triangle whose top is at this cell,
# calculate the maximum path sum when starting from this cell, and store the result
# in the corresponding cell of the blank triangle.
#
# If we start at a particular cell, what is the maximum path total? If the cell is at the
# bottom of the big triangle, then it is simply the cell's value. Otherwise the answer is
# the cell's value plus either {the maximum path total of the cell down and to the left}
# or {the maximum path total of the cell down and to the right}, whichever is greater.
# By computing the blank triangle's values from bottom up, the dependent values are always
# computed before they are utilized. This technique is known as dynamic programming.
def compute():
for i in reversed(range(len(triangle) - 1)):
for j in range(len(triangle[i])):
triangle[i][j] += max(triangle[i + 1][j], triangle[i + 1][j + 1])
return str(triangle[0][0])
triangle = [ # Mutable
[59],
[73,41],
[52,40, 9],
[26,53, 6,34],
[10,51,87,86,81],
[61,95,66,57,25,68],
[90,81,80,38,92,67,73],
[30,28,51,76,81,18,75,44],
[84,14,95,87,62,81,17,78,58],
[21,46,71,58, 2,79,62,39,31, 9],
[56,34,35,53,78,31,81,18,90,93,15],
[78,53, 4,21,84,93,32,13,97,11,37,51],
[45, 3,81,79, 5,18,78,86,13,30,63,99,95],
[39,87,96,28, 3,38,42,17,82,87,58, 7,22,57],
[ 6,17,51,17, 7,93, 9, 7,75,97,95,78,87, 8,53],
[67,66,59,60,88,99,94,65,55,77,55,34,27,53,78,28],
[76,40,41, 4,87,16, 9,42,75,69,23,97,30,60,10,79,87],
[12,10,44,26,21,36,32,84,98,60,13,12,36,16,63,31,91,35],
[70,39, 6, 5,55,27,38,48,28,22,34,35,62,62,15,14,94,89,86],
[66,56,68,84,96,21,34,34,34,81,62,40,65,54,62, 5,98, 3, 2,60],
[38,89,46,37,99,54,34,53,36,14,70,26, 2,90,45,13,31,61,83,73,47],
[36,10,63,96,60,49,41, 5,37,42,14,58,84,93,96,17, 9,43, 5,43, 6,59],
[66,57,87,57,61,28,37,51,84,73,79,15,39,95,88,87,43,39,11,86,77,74,18],
[54,42, 5,79,30,49,99,73,46,37,50, 2,45, 9,54,52,27,95,27,65,19,45,26,45],
[71,39,17,78,76,29,52,90,18,99,78,19,35,62,71,19,23,65,93,85,49,33,75, 9, 2],
[33,24,47,61,60,55,32,88,57,55,91,54,46,57, 7,77,98,52,80,99,24,25,46,78,79, 5],
[92, 9,13,55,10,67,26,78,76,82,63,49,51,31,24,68, 5,57, 7,54,69,21,67,43,17,63,12],
[24,59, 6, 8,98,74,66,26,61,60,13, 3, 9, 9,24,30,71, 8,88,70,72,70,29,90,11,82,41,34],
[66,82,67, 4,36,60,92,77,91,85,62,49,59,61,30,90,29,94,26,41,89, 4,53,22,83,41, 9,74,90],
[48,28,26,37,28,52,77,26,51,32,18,98,79,36,62,13,17, 8,19,54,89,29,73,68,42,14, 8,16,70,37],
[37,60,69,70,72,71, 9,59,13,60,38,13,57,36, 9,30,43,89,30,39,15, 2,44,73, 5,73,26,63,56,86,12],
[55,55,85,50,62,99,84,77,28,85, 3,21,27,22,19,26,82,69,54, 4,13, 7,85,14, 1,15,70,59,89,95,10,19],
[ 4, 9,31,92,91,38,92,86,98,75,21, 5,64,42,62,84,36,20,73,42,21,23,22,51,51,79,25,45,85,53, 3,43,22],
[75,63, 2,49,14,12,89,14,60,78,92,16,44,82,38,30,72,11,46,52,90,27, 8,65,78, 3,85,41,57,79,39,52,33,48],
[78,27,56,56,39,13,19,43,86,72,58,95,39, 7, 4,34,21,98,39,15,39,84,89,69,84,46,37,57,59,35,59,50,26,15,93],
[42,89,36,27,78,91,24,11,17,41, 5,94, 7,69,51,96, 3,96,47,90,90,45,91,20,50,56,10,32,36,49, 4,53,85,92,25,65],
[52, 9,61,30,61,97,66,21,96,92,98,90, 6,34,96,60,32,69,68,33,75,84,18,31,71,50,84,63, 3, 3,19,11,28,42,75,45,45],
[61,31,61,68,96,34,49,39, 5,71,76,59,62,67, 6,47,96,99,34,21,32,47,52, 7,71,60,42,72,94,56,82,83,84,40,94,87,82,46],
[ 1,20,60,14,17,38,26,78,66,81,45,95,18,51,98,81,48,16,53,88,37,52,69,95,72,93,22,34,98,20,54,27,73,61,56,63,60,34,63],
[93,42,94,83,47,61,27,51,79,79,45, 1,44,73,31,70,83,42,88,25,53,51,30,15,65,94,80,44,61,84,12,77, 2,62, 2,65,94,42,14,94],
[32,73, 9,67,68,29,74,98,10,19,85,48,38,31,85,67,53,93,93,77,47,67,39,72,94,53,18,43,77,40,78,32,29,59,24, 6, 2,83,50,60,66],
[32, 1,44,30,16,51,15,81,98,15,10,62,86,79,50,62,45,60,70,38,31,85,65,61,64, 6,69,84,14,22,56,43, 9,48,66,69,83,91,60,40,36,61],
[92,48,22,99,15,95,64,43, 1,16,94, 2,99,19,17,69,11,58,97,56,89,31,77,45,67,96,12,73, 8,20,36,47,81,44,50,64,68,85,40,81,85,52, 9],
[91,35,92,45,32,84,62,15,19,64,21,66, 6, 1,52,80,62,59,12,25,88,28,91,50,40,16,22,99,92,79,87,51,21,77,74,77, 7,42,38,42,74,83, 2, 5],
[46,19,77,66,24,18, 5,32, 2,84,31,99,92,58,96,72,91,36,62,99,55,29,53,42,12,37,26,58,89,50,66,19,82,75,12,48,24,87,91,85, 2, 7, 3,76,86],
[99,98,84,93, 7,17,33,61,92,20,66,60,24,66,40,30,67, 5,37,29,24,96, 3,27,70,62,13, 4,45,47,59,88,43,20,66,15,46,92,30, 4,71,66,78,70,53,99],
[67,60,38, 6,88, 4,17,72,10,99,71, 7,42,25,54, 5,26,64,91,50,45,71, 6,30,67,48,69,82, 8,56,80,67,18,46,66,63, 1,20, 8,80,47, 7,91,16, 3,79,87],
[18,54,78,49,80,48,77,40,68,23,60,88,58,80,33,57,11,69,55,53,64, 2,94,49,60,92,16,35,81,21,82,96,25,24,96,18, 2, 5,49, 3,50,77, 6,32,84,27,18,38],
[68, 1,50, 4, 3,21,42,94,53,24,89, 5,92,26,52,36,68,11,85, 1, 4,42, 2,45,15, 6,50, 4,53,73,25,74,81,88,98,21,67,84,79,97,99,20,95, 4,40,46, 2,58,87],
[94,10, 2,78,88,52,21, 3,88,60, 6,53,49,71,20,91,12,65, 7,49,21,22,11,41,58,99,36,16, 9,48,17,24,52,36,23,15,72,16,84,56, 2,99,43,76,81,71,29,39,49,17],
[64,39,59,84,86,16,17,66, 3, 9,43, 6,64,18,63,29,68, 6,23, 7,87,14,26,35,17,12,98,41,53,64,78,18,98,27,28,84,80,67,75,62,10,11,76,90,54,10, 5,54,41,39,66],
[43,83,18,37,32,31,52,29,95,47, 8,76,35,11, 4,53,35,43,34,10,52,57,12,36,20,39,40,55,78,44, 7,31,38,26, 8,15,56,88,86, 1,52,62,10,24,32, 5,60,65,53,28,57,99],
[ 3,50, 3,52, 7,73,49,92,66,80, 1,46, 8,67,25,36,73,93, 7,42,25,53,13,96,76,83,87,90,54,89,78,22,78,91,73,51,69, 9,79,94,83,53, 9,40,69,62,10,79,49,47, 3,81,30],
[71,54,73,33,51,76,59,54,79,37,56,45,84,17,62,21,98,69,41,95,65,24,39,37,62, 3,24,48,54,64,46,82,71,78,33,67, 9,16,96,68,52,74,79,68,32,21,13,78,96,60, 9,69,20,36],
[73,26,21,44,46,38,17,83,65,98, 7,23,52,46,61,97,33,13,60,31,70,15,36,77,31,58,56,93,75,68,21,36,69,53,90,75,25,82,39,50,65,94,29,30,11,33,11,13,96, 2,56,47, 7,49, 2],
[76,46,73,30,10,20,60,70,14,56,34,26,37,39,48,24,55,76,84,91,39,86,95,61,50,14,53,93,64,67,37,31,10,84,42,70,48,20,10,72,60,61,84,79,69,65,99,73,89,25,85,48,92,56,97,16],
[ 3,14,80,27,22,30,44,27,67,75,79,32,51,54,81,29,65,14,19, 4,13,82, 4,91,43,40,12,52,29,99, 7,76,60,25, 1, 7,61,71,37,92,40,47,99,66,57, 1,43,44,22,40,53,53, 9,69,26,81, 7],
[49,80,56,90,93,87,47,13,75,28,87,23,72,79,32,18,27,20,28,10,37,59,21,18,70, 4,79,96, 3,31,45,71,81, 6,14,18,17, 5,31,50,92,79,23,47, 9,39,47,91,43,54,69,47,42,95,62,46,32,85],
[37,18,62,85,87,28,64, 5,77,51,47,26,30,65, 5,70,65,75,59,80,42,52,25,20,44,10,92,17,71,95,52,14,77,13,24,55,11,65,26,91, 1,30,63,15,49,48,41,17,67,47, 3,68,20,90,98,32, 4,40,68],
[90,51,58,60, 6,55,23,68, 5,19,76,94,82,36,96,43,38,90,87,28,33,83, 5,17,70,83,96,93, 6, 4,78,47,80, 6,23,84,75,23,87,72,99,14,50,98,92,38,90,64,61,58,76,94,36,66,87,80,51,35,61,38],
[57,95,64, 6,53,36,82,51,40,33,47,14, 7,98,78,65,39,58,53, 6,50,53, 4,69,40,68,36,69,75,78,75,60, 3,32,39,24,74,47,26,90,13,40,44,71,90,76,51,24,36,50,25,45,70,80,61,80,61,43,90,64,11],
[18,29,86,56,68,42,79,10,42,44,30,12,96,18,23,18,52,59, 2,99,67,46,60,86,43,38,55,17,44,93,42,21,55,14,47,34,55,16,49,24,23,29,96,51,55,10,46,53,27,92,27,46,63,57,30,65,43,27,21,20,24,83],
[81,72,93,19,69,52,48, 1,13,83,92,69,20,48,69,59,20,62, 5,42,28,89,90,99,32,72,84,17, 8,87,36, 3,60,31,36,36,81,26,97,36,48,54,56,56,27,16,91, 8,23,11,87,99,33,47, 2,14,44,73,70,99,43,35,33],
[90,56,61,86,56,12,70,59,63,32, 1,15,81,47,71,76,95,32,65,80,54,70,34,51,40,45,33, 4,64,55,78,68,88,47,31,47,68,87, 3,84,23,44,89,72,35, 8,31,76,63,26,90,85,96,67,65,91,19,14,17,86, 4,71,32,95],
[37,13, 4,22,64,37,37,28,56,62,86,33, 7,37,10,44,52,82,52, 6,19,52,57,75,90,26,91,24, 6,21,14,67,76,30,46,14,35,89,89,41, 3,64,56,97,87,63,22,34, 3,79,17,45,11,53,25,56,96,61,23,18,63,31,37,37,47],
[77,23,26,70,72,76,77, 4,28,64,71,69,14,85,96,54,95,48, 6,62,99,83,86,77,97,75,71,66,30,19,57,90,33, 1,60,61,14,12,90,99,32,77,56,41,18,14,87,49,10,14,90,64,18,50,21,74,14,16,88, 5,45,73,82,47,74,44],
[22,97,41,13,34,31,54,61,56,94, 3,24,59,27,98,77, 4, 9,37,40,12,26,87, 9,71,70, 7,18,64,57,80,21,12,71,83,94,60,39,73,79,73,19,97,32,64,29,41, 7,48,84,85,67,12,74,95,20,24,52,41,67,56,61,29,93,35,72,69],
[72,23,63,66, 1,11, 7,30,52,56,95,16,65,26,83,90,50,74,60,18,16,48,43,77,37,11,99,98,30,94,91,26,62,73,45,12,87,73,47,27, 1,88,66,99,21,41,95,80, 2,53,23,32,61,48,32,43,43,83,14,66,95,91,19,81,80,67,25,88],
[ 8,62,32,18,92,14,83,71,37,96,11,83,39,99, 5,16,23,27,10,67, 2,25,44,11,55,31,46,64,41,56,44,74,26,81,51,31,45,85,87, 9,81,95,22,28,76,69,46,48,64,87,67,76,27,89,31,11,74,16,62, 3,60,94,42,47, 9,34,94,93,72],
[56,18,90,18,42,17,42,32,14,86, 6,53,33,95,99,35,29,15,44,20,49,59,25,54,34,59,84,21,23,54,35,90,78,16,93,13,37,88,54,19,86,67,68,55,66,84,65,42,98,37,87,56,33,28,58,38,28,38,66,27,52,21,81,15, 8,22,97,32,85,27],
[91,53,40,28,13,34,91,25, 1,63,50,37,22,49,71,58,32,28,30,18,68,94,23,83,63,62,94,76,80,41,90,22,82,52,29,12,18,56,10, 8,35,14,37,57,23,65,67,40,72,39,93,39,70,89,40,34, 7,46,94,22,20, 5,53,64,56,30, 5,56,61,88,27],
[23,95,11,12,37,69,68,24,66,10,87,70,43,50,75, 7,62,41,83,58,95,93,89,79,45,39, 2,22, 5,22,95,43,62,11,68,29,17,40,26,44,25,71,87,16,70,85,19,25,59,94,90,41,41,80,61,70,55,60,84,33,95,76,42,63,15, 9, 3,40,38,12, 3,32],
[ 9,84,56,80,61,55,85,97,16,94,82,94,98,57,84,30,84,48,93,90,71, 5,95,90,73,17,30,98,40,64,65,89, 7,79, 9,19,56,36,42,30,23,69,73,72, 7, 5,27,61,24,31,43,48,71,84,21,28,26,65,65,59,65,74,77,20,10,81,61,84,95, 8,52,23,70],
[47,81,28, 9,98,51,67,64,35,51,59,36,92,82,77,65,80,24,72,53,22, 7,27,10,21,28,30,22,48,82,80,48,56,20,14,43,18,25,50,95,90,31,77, 8, 9,48,44,80,90,22,93,45,82,17,13,96,25,26, 8,73,34,99, 6,49,24, 6,83,51,40,14,15,10,25, 1],
[54,25,10,81,30,64,24,74,75,80,36,75,82,60,22,69,72,91,45,67, 3,62,79,54,89,74,44,83,64,96,66,73,44,30,74,50,37, 5, 9,97,70, 1,60,46,37,91,39,75,75,18,58,52,72,78,51,81,86,52, 8,97, 1,46,43,66,98,62,81,18,70,93,73, 8,32,46,34],
[96,80,82, 7,59,71,92,53,19,20,88,66, 3,26,26,10,24,27,50,82,94,73,63, 8,51,33,22,45,19,13,58,33,90,15,22,50,36,13,55, 6,35,47,82,52,33,61,36,27,28,46,98,14,73,20,73,32,16,26,80,53,47,66,76,38,94,45, 2, 1,22,52,47,96,64,58,52,39],
[88,46,23,39,74,63,81,64,20,90,33,33,76,55,58,26,10,46,42,26,74,74,12,83,32,43, 9, 2,73,55,86,54,85,34,28,23,29,79,91,62,47,41,82,87,99,22,48,90,20, 5,96,75,95, 4,43,28,81,39,81, 1,28,42,78,25,39,77,90,57,58,98,17,36,73,22,63,74,51],
[29,39,74,94,95,78,64,24,38,86,63,87,93, 6,70,92,22,16,80,64,29,52,20,27,23,50,14,13,87,15,72,96,81,22, 8,49,72,30,70,24,79,31,16,64,59,21,89,34,96,91,48,76,43,53,88, 1,57,80,23,81,90,79,58, 1,80,87,17,99,86,90,72,63,32,69,14,28,88,69],
[37,17,71,95,56,93,71,35,43,45, 4,98,92,94,84,96,11,30,31,27,31,60,92, 3,48, 5,98,91,86,94,35,90,90, 8,48,19,33,28,68,37,59,26,65,96,50,68,22, 7, 9,49,34,31,77,49,43, 6,75,17,81,87,61,79,52,26,27,72,29,50, 7,98,86, 1,17,10,46,64,24,18,56],
[51,30,25,94,88,85,79,91,40,33,63,84,49,67,98,92,15,26,75,19,82, 5,18,78,65,93,61,48,91,43,59,41,70,51,22,15,92,81,67,91,46,98,11,11,65,31,66,10,98,65,83,21, 5,56, 5,98,73,67,46,74,69,34, 8,30, 5,52, 7,98,32,95,30,94,65,50,24,63,28,81,99,57],
[19,23,61,36, 9,89,71,98,65,17,30,29,89,26,79,74,94,11,44,48,97,54,81,55,39,66,69,45,28,47,13,86,15,76,74,70,84,32,36,33,79,20,78,14,41,47,89,28,81, 5,99,66,81,86,38,26, 6,25,13,60,54,55,23,53,27, 5,89,25,23,11,13,54,59,54,56,34,16,24,53,44, 6],
[13,40,57,72,21,15,60, 8, 4,19,11,98,34,45, 9,97,86,71, 3,15,56,19,15,44,97,31,90, 4,87,87,76, 8,12,30,24,62,84,28,12,85,82,53,99,52,13,94, 6,65,97,86, 9,50,94,68,69,74,30,67,87,94,63, 7,78,27,80,36,69,41, 6,92,32,78,37,82,30, 5,18,87,99,72,19,99],
[44,20,55,77,69,91,27,31,28,81,80,27, 2, 7,97,23,95,98,12,25,75,29,47,71, 7,47,78,39,41,59,27,76,13,15,66,61,68,35,69,86,16,53,67,63,99,85,41,56, 8,28,33,40,94,76,90,85,31,70,24,65,84,65,99,82,19,25,54,37,21,46,33, 2,52,99,51,33,26, 4,87, 2, 8,18,96],
[54,42,61,45,91, 6,64,79,80,82,32,16,83,63,42,49,19,78,65,97,40,42,14,61,49,34, 4,18,25,98,59,30,82,72,26,88,54,36,21,75, 3,88,99,53,46,51,55,78,22,94,34,40,68,87,84,25,30,76,25, 8,92,84,42,61,40,38, 9,99,40,23,29,39,46,55,10,90,35,84,56,70,63,23,91,39],
[52,92, 3,71,89, 7, 9,37,68,66,58,20,44,92,51,56,13,71,79,99,26,37, 2, 6,16,67,36,52,58,16,79,73,56,60,59,27,44,77,94,82,20,50,98,33, 9,87,94,37,40,83,64,83,58,85,17,76,53, 2,83,52,22,27,39,20,48,92,45,21, 9,42,24,23,12,37,52,28,50,78,79,20,86,62,73,20,59],
[54,96,80,15,91,90,99,70,10, 9,58,90,93,50,81,99,54,38,36,10,30,11,35,84,16,45,82,18,11,97,36,43,96,79,97,65,40,48,23,19,17,31,64,52,65,65,37,32,65,76,99,79,34,65,79,27,55,33, 3, 1,33,27,61,28,66, 8, 4,70,49,46,48,83, 1,45,19,96,13,81,14,21,31,79,93,85,50, 5],
[92,92,48,84,59,98,31,53,23,27,15,22,79,95,24,76, 5,79,16,93,97,89,38,89,42,83, 2,88,94,95,82,21, 1,97,48,39,31,78, 9,65,50,56,97,61, 1, 7,65,27,21,23,14,15,80,97,44,78,49,35,33,45,81,74,34, 5,31,57, 9,38,94, 7,69,54,69,32,65,68,46,68,78,90,24,28,49,51,45,86,35],
[41,63,89,76,87,31,86, 9,46,14,87,82,22,29,47,16,13,10,70,72,82,95,48,64,58,43,13,75,42,69,21,12,67,13,64,85,58,23,98, 9,37,76, 5,22,31,12,66,50,29,99,86,72,45,25,10,28,19, 6,90,43,29,31,67,79,46,25,74,14,97,35,76,37,65,46,23,82, 6,22,30,76,93,66,94,17,96,13,20,72],
[63,40,78, 8,52, 9,90,41,70,28,36,14,46,44,85,96,24,52,58,15,87,37, 5,98,99,39,13,61,76,38,44,99,83,74,90,22,53,80,56,98,30,51,63,39,44,30,91,91, 4,22,27,73,17,35,53,18,35,45,54,56,27,78,48,13,69,36,44,38,71,25,30,56,15,22,73,43,32,69,59,25,93,83,45,11,34,94,44,39,92],
[12,36,56,88,13,96,16,12,55,54,11,47,19,78,17,17,68,81,77,51,42,55,99,85,66,27,81,79,93,42,65,61,69,74,14, 1,18,56,12, 1,58,37,91,22,42,66,83,25,19, 4,96,41,25,45,18,69,96,88,36,93,10,12,98,32,44,83,83, 4,72,91, 4,27,73, 7,34,37,71,60,59,31, 1,54,54,44,96,93,83,36, 4,45],
[30,18,22,20,42,96,65,79,17,41,55,69,94,81,29,80,91,31,85,25,47,26,43,49, 2,99,34,67,99,76,16,14,15,93, 8,32,99,44,61,77,67,50,43,55,87,55,53,72,17,46,62,25,50,99,73, 5,93,48,17,31,70,80,59, 9,44,59,45,13,74,66,58,94,87,73,16,14,85,38,74,99,64,23,79,28,71,42,20,37,82,31,23],
[51,96,39,65,46,71,56,13,29,68,53,86,45,33,51,49,12,91,21,21,76,85, 2,17,98,15,46,12,60,21,88,30,92,83,44,59,42,50,27,88,46,86,94,73,45,54,23,24,14,10,94,21,20,34,23,51, 4,83,99,75,90,63,60,16,22,33,83,70,11,32,10,50,29,30,83,46,11, 5,31,17,86,42,49, 1,44,63,28,60, 7,78,95,40],
[44,61,89,59, 4,49,51,27,69,71,46,76,44, 4, 9,34,56,39,15, 6,94,91,75,90,65,27,56,23,74, 6,23,33,36,69,14,39, 5,34,35,57,33,22,76,46,56,10,61,65,98, 9,16,69, 4,62,65,18,99,76,49,18,72,66,73,83,82,40,76,31,89,91,27,88,17,35,41,35,32,51,32,67,52,68,74,85,80,57, 7,11,62,66,47,22,67],
[65,37,19,97,26,17,16,24,24,17,50,37,64,82,24,36,32,11,68,34,69,31,32,89,79,93,96,68,49,90,14,23, 4, 4,67,99,81,74,70,74,36,96,68, 9,64,39,88,35,54,89,96,58,66,27,88,97,32,14, 6,35,78,20,71, 6,85,66,57, 2,58,91,72, 5,29,56,73,48,86,52, 9,93,22,57,79,42,12, 1,31,68,17,59,63,76, 7,77],
[73,81,14,13,17,20,11, 9, 1,83, 8,85,91,70,84,63,62,77,37, 7,47, 1,59,95,39,69,39,21,99, 9,87, 2,97,16,92,36,74,71,90,66,33,73,73,75,52,91,11,12,26,53, 5,26,26,48,61,50,90,65, 1,87,42,47,74,35,22,73,24,26,56,70,52, 5,48,41,31,18,83,27,21,39,80,85,26, 8,44, 2,71, 7,63,22, 5,52,19, 8,20],
[17,25,21,11,72,93,33,49,64,23,53,82, 3,13,91,65,85, 2,40, 5,42,31,77,42, 5,36, 6,54, 4,58, 7,76,87,83,25,57,66,12,74,33,85,37,74,32,20,69, 3,97,91,68,82,44,19,14,89,28,85,85,80,53,34,87,58,98,88,78,48,65,98,40,11,57,10,67,70,81,60,79,74,72,97,59,79,47,30,20,54,80,89,91,14, 5,33,36,79,39],
[60,85,59,39,60, 7,57,76,77,92, 6,35,15,72,23,41,45,52,95,18,64,79,86,53,56,31,69,11,91,31,84,50,44,82,22,81,41,40,30,42,30,91,48,94,74,76,64,58,74,25,96,57,14,19, 3,99,28,83,15,75,99, 1,89,85,79,50, 3,95,32,67,44, 8, 7,41,62,64,29,20,14,76,26,55,48,71,69,66,19,72,44,25,14, 1,48,74,12,98, 7],
[64,66,84,24,18,16,27,48,20,14,47,69,30,86,48,40,23,16,61,21,51,50,26,47,35,33,91,28,78,64,43,68, 4,79,51, 8,19,60,52,95, 6,68,46,86,35,97,27,58, 4,65,30,58,99,12,12,75,91,39,50,31,42,64,70, 4,46, 7,98,73,98,93,37,89,77,91,64,71,64,65,66,21,78,62,81,74,42,20,83,70,73,95,78,45,92,27,34,53,71,15],
[30,11,85,31,34,71,13,48, 5,14,44, 3,19,67,23,73,19,57, 6,90,94,72,57,69,81,62,59,68,88,57,55,69,49,13, 7,87,97,80,89, 5,71, 5, 5,26,38,40,16,62,45,99,18,38,98,24,21,26,62,74,69, 4,85,57,77,35,58,67,91,79,79,57,86,28,66,34,72,51,76,78,36,95,63,90, 8,78,47,63,45,31,22,70,52,48,79,94,15,77,61,67,68],
[23,33,44,81,80,92,93,75,94,88,23,61,39,76,22, 3,28,94,32, 6,49,65,41,34,18,23, 8,47,62,60, 3,63,33,13,80,52,31,54,73,43,70,26,16,69,57,87,83,31, 3,93,70,81,47,95,77,44,29,68,39,51,56,59,63, 7,25,70, 7,77,43,53,64, 3,94,42,95,39,18, 1,66,21,16,97,20,50,90,16,70,10,95,69,29, 6,25,61,41,26,15,59,63,35],
]
if __name__ == "__main__":
print(compute())
| [
"[email protected]"
] | |
b89827e7bd2186efac21f3de64db0f0df6ff1c32 | c2296f56df3b934f824be07338e14bccf7c0e34f | /url_classification/data/movie_reviews/__init__.py | b3a85173320bf97854087bfab6ecbd94c0f6812c | [] | no_license | jayceyxc/MachineLearning | b190c141be714f4ef7d8b79fab1d0cddc6b7cfcb | 793179dab920725866c4fac4d2bae8e1a570d122 | refs/heads/master | 2022-04-16T21:39:05.652266 | 2020-04-14T07:51:04 | 2020-04-14T07:51:04 | 140,239,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: ‘yuxuecheng‘
@contact: [email protected]
@software: PyCharm Community Edition
@file: __init__.py.py
@time: 2017/8/7 12:18
""" | [
"[email protected]"
] | |
bc4b65ebdb6ee14010eca2df7ef43ad79f259952 | ab1219ddcc33c6162baa670b5cf84e9e6780dba2 | /benchmarks/bp09/tsuji_gauges/setrun_1-5.py | b5d502247dccbfd60e1885c95e5bd651f7b5e2a9 | [] | no_license | dhanyaj17/geoclaw-group | 1672ff47992f4901cb81ac6aebaf58ae122ad466 | 6acc142ce0ec14ca00944e1d2b96cf7080ad3db4 | refs/heads/master | 2020-12-11T04:01:02.844249 | 2013-01-14T01:30:14 | 2013-01-14T01:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,042 | py | ## Randy: This run took about 4 hours, as it is set up now.
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
from pyclaw import data
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
ndim = 2
rundata = data.ClawRunData(claw_pkg, ndim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata) # Defined below
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
clawdata.restart = False # Turn restart switch on or off
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.ndim = ndim
# Lower and upper edge of computational domain:
# clawdata.xlower = 137.57 ##
# clawdata.xupper = 141.41 ##
# clawdata.ylower = 39.67 ##
# clawdata.yupper = 44.15 ##
# For OK08 grid:
# clawdata.xlower = 138.5015 ##
# clawdata.xupper = 140.541 ##
# clawdata.ylower = 40.5215 ##
# clawdata.yupper = 43.2988 ##
clawdata.xlower = 139.05 ##
clawdata.xupper = 140. ##
clawdata.ylower = 41.6 ##
clawdata.yupper = 42.55 ##
# # Number of grid cells:
# clawdata.mx = 36 ## 3.84 deg/36 cells = 384 sec/cell = 16*24 sec/cell
# clawdata.my = 42 ## 4.48 deg/42 cells = 384 sec/cell = 16*24 sec/cell
# clawdata.mx = 576 ## 3.84 deg/576 cells = 24 sec/cell
# clawdata.my = 672 ## 4.48 deg/672 cells = 24 sec/cell
# clawdata.mx = 84 ## 8*24 sec/cell
# clawdata.my = 72 ## 8*24 sec/cell
clawdata.mx = 60
clawdata.my = 60
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.meqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.maux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.mcapa = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.outstyle = 1 ##
if clawdata.outstyle==1:
# Output nout frames at equally spaced times up to tfinal:
# Note: Frame time intervals = (tfinal-t0)/nout
clawdata.nout = 7 ## Number of frames (plus the t = 0.0 frame)
clawdata.tfinal = 7*60 ## End run time in Seconds
elif clawdata.outstyle == 2:
# Specify a list of output times.
from numpy import arange,linspace
#clawdata.tout = list(arange(0,3600,360)) + list(3600*arange(0,21,0.5))
# clawdata.tout = list(linspace(0,32000,9)) + \
# list(linspace(32500,40000,16))
clawdata.tout = list(linspace(0,4,2))
clawdata.nout = len(clawdata.tout)
elif clawdata.outstyle == 3:
# Output every iout timesteps with a total of ntot time steps:
iout = 1
ntot = 1
clawdata.iout = [iout, ntot]
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = 1
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.max_steps = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Transverse order for 2d or 3d (not used in 1d):
clawdata.order_trans = 2
# Number of waves in the Riemann solution:
clawdata.mwaves = 3
# List of limiters to use for each wave family:
# Required: len(mthlim) == mwaves
clawdata.mthlim = [3,3,3]
# Source terms splitting:
# src_split == 0 => no source term (src routine never called)
# src_split == 1 => Godunov (1st order) splitting used,
# src_split == 2 => Strang (2nd order) splitting used, not recommended.
clawdata.src_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.mbc = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.mthbc_xlower = 1 # Open Left BC
clawdata.mthbc_xupper = 1 # Open Right BC
clawdata.mthbc_ylower = 1 # Open Bottom BC
clawdata.mthbc_yupper = 1 # Open Top BC
# ---------------
# AMR parameters:
# ---------------
# max number of refinement levels:
mxnest = 5 ##
clawdata.mxnest = -mxnest # negative ==> anisotropic refinement in x,y,t
# List of refinement ratios at each level (length at least mxnest-1)
## Levels 2 3 4 5
clawdata.inratx = [2,4,4,6] ##
clawdata.inraty = [2,4,4,6] ##
clawdata.inratt = [2,4,4,2] ##
# Specify type of each aux variable in clawdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
clawdata.auxtype = ['center','capacity','yleft']
clawdata.tol = -1.0 # negative ==> don't use Richardson estimator
clawdata.tolsp = 0.5 # used in default flag2refine subroutine
# (Not used in geoclaw!)
clawdata.kcheck = 3 # how often to regrid (every kcheck steps)
clawdata.ibuff = 2 # width of buffer zone around flagged points
clawdata.tchk = [33000., 35000.] # when to checkpoint
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geodata = rundata.geodata
except:
print "*** Error, this rundata has no geodata attribute"
raise AttributeError("Missing geodata attribute")
# == setgeo.data values ==
geodata.variable_dt_refinement_ratios = True ## Overrides clawdata.inratt, above
geodata.igravity = 1
geodata.gravity = 9.81
geodata.icoordsys = 2
geodata.Rearth = 6367.5e3
geodata.icoriolis = 0
# == settsunami.data values ==
geodata.sealevel = 0.
geodata.drytolerance = 1.e-2
geodata.wavetolerance = 1.e-1 ##
geodata.depthdeep = 1.e6 ## Definition of "deep" water
geodata.maxleveldeep = 10 ## Restriction on the number of deep water levels
geodata.ifriction = 1 ## Friction switch. 0=off, 1=on
# geodata.coeffmanning =0.0
geodata.coeffmanning =.025
geodata.frictiondepth = 10.
#okushiri_dir = '/Users/FrankGonzalez/daily/modeling/tsunami-benchmarks/github/' \
#+ 'FrankGonzalez/geoclaw-group/benchmarks/bp09' ##
okushiri_dir = '..' ## this directory
# == settopo.data values ==
geodata.topofiles = []
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
# geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
# okushiri_dir + '/OK24.tt1']) ## 24-s, ~550-740 m Entire Domain (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
okushiri_dir + '/OK08.tt1']) ## 8-s, ~184-247 m Okushiri (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
okushiri_dir + '/OK03.tt1']) ## 2.67 s (8/3s), ~61-82 m Okushiri (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0., 1.e10, \
okushiri_dir + '/AO15.tt1']) ## 0.53-0.89 s, ~16.5-20.4 m, Aonae (Dmitry's version of Kansai U.)
# geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
# okushiri_dir + '/MO01.tt1']) ## 0.89 s, ~20-27 m, Monai (Dmitry's version of Kansai U.)
# geodata.topofiles.append([1, 1, 1, 0., 1.e10, \
# okushiri_dir + '/MB05.tt1']) ## 0.13-0.18 s, ~4 m Monai (Dmitry's version of Kansai U.)
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth40_138.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth40_140.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth42_138.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth42_140.txt']) ## JODC 500 m
# == setdtopo.data values ==
geodata.dtopofiles = []
# for moving topography, append lines of the form: (<= 1 allowed for now!)
# [topotype, minlevel,maxlevel,fname]
geodata.dtopofiles.append([1,2,3, okushiri_dir + '/HNO1993.txyz']) ## Dmitry N.'s version of Kansai U.
# == setqinit.data values ==
geodata.iqinit = 0
geodata.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
#geodata.qinitfiles.append([1, 1, 'hump.xyz'])
# == setregions.data values ==
geodata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Note: Level 1 = 24 s & Levels [2,3,4,5] = RF [3,3,3,8] => Res of 8 sec to 8/3 sec to 8/9 to 1/9 sec/cell
# Grid Limits
# Name x1 x2 y1 y2
# OK24 137.53666670 141.53000000 39.53666670 44.26333330
# HNO 138.50000000 140.55000000 40.51666670 43.30000000
# OK08 138.50111110 140.55222220 40.52111110 43.29888890
# OK03 139.38925930 139.66407410 41.99592590 42.27074070
# AO15 139.43419750 139.49987650 42.03118520 42.07251850
# MO01 139.41123460 139.43320990 42.07790120 42.14580250
# MB05 139.41385190 139.42639510 42.09458550 42.10343920
#geodata.regions.append([1, 1, 0., 1e9, 0.0, 360.0, -90.0, 90.0]) ## OK24: 24-s, ~550-740 m Entire Domain
geodata.regions.append([1, 2, 0., 1e9, 138.5, 139.7, 41.4, 43.3]) ## OK08: 8-s, ~184-247 m Okushiri
geodata.regions.append([1, 3, 0., 1e9, 139.39, 139.6, 42.0, 42.25]) ## OK03: 2.67 s (8/3s), ~61-82 m Okushiri
# geodata.regions.append([1, 4, 0., 1e9, 139.42, 139.57, 42.03, 42.23]) ## AO15: 0.53-8/9 s, ~16.5-20.4 m, Aonae
#geodata.regions.append([1, 4, 0., 1e9, 139.40, 139.46, 42.03, 42.22]) ## West coast Okushiri
geodata.regions.append([4, 4, 90., 1e9, 139.4, 139.432, 42.12, 42.2])
# == setgauges.data values ==
geodata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
geodata.gauges.append([1,139.429211710298,42.188181491811,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([3,139.411185686023,42.162762869034,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([5,139.418261206409,42.137404393442,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([6,139.428035766149,42.093012384481,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([7,139.426244998662,42.116554785296,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([8,139.423714744650,42.100414145210,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([9,139.428901803617,42.076636582137,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([10,139.427853421935,42.065461519438,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([11,139.451539852594,42.044696547058,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([12,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([13,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs
#
# == setfixedgrids.data values ==
geodata.fixedgrids = []
for g in geodata.gauges:
xg = g[1]
yg = g[2]
xg1 = xg - 0.001
xg2 = xg + 0.002
yg1 = yg - 0.001
yg2 = yg + 0.002
nx = 31
ny = 31
gaugeno = g[0]
if gaugeno == 9:
xg2 = xg + 0.003
nx = 41
if gaugeno == 8:
xg1 = xg - 0.002
xg2 = xg + 0.001
yg1 = yg - 0.002
yg2 = yg + 0.001
geodata.fixedgrids.append([210.0,360.0,11,xg1,xg2,yg1,yg2,nx,ny,0,1])
geodata.regions.append([5, 5, 180., 1e9, xg1-0.002,xg2,yg1-0.001,yg2])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
| [
"[email protected]"
] | |
5b790020d26b72bff49b53062649a522938f40a0 | 63bacb52d016cf7a237dacd79ba2861842c49ca9 | /zuora_client/models/amendment_rate_plan_charge_data.py | ae75c81b0fd08894b355d536105e55ada0a62f60 | [] | no_license | arundharumar-optimizely/zuora-client-python | ee9667956b32b64b456920ad6246e02528fe6645 | a529a01364e41844c91f39df300c85c8d332912a | refs/heads/master | 2020-07-05T23:09:20.081816 | 2019-07-30T21:46:47 | 2019-07-30T21:46:47 | 202,811,594 | 0 | 0 | null | 2019-08-16T23:26:52 | 2019-08-16T23:26:52 | null | UTF-8 | Python | false | false | 44,271 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Zuora_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Responses_and_Errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | | creditTaxItems | 238.0 and earlier | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\") | Container for the taxation items of the credit memo item. | | taxItems | 238.0 and earlier | [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the debit memo item. | | taxationItems | 239.0 and later | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\"); [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the memo item. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation, Export ZOQL queries, and Data Query. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Feature | `Feature` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Feature | `ProductFeature` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Subscription Product Feature | `SubscriptionProductFeature` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2019-07-26
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from zuora_client.models.amendment_rate_plan_charge_data_rate_plan_charge import AmendmentRatePlanChargeDataRatePlanCharge # noqa: F401,E501
from zuora_client.models.amendment_rate_plan_charge_tier import AmendmentRatePlanChargeTier # noqa: F401,E501
class AmendmentRatePlanChargeData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rate_plan_charge': 'AmendmentRatePlanChargeDataRatePlanCharge',
'rate_plan_charge_tier': 'list[AmendmentRatePlanChargeTier]'
}
attribute_map = {
'rate_plan_charge': 'RatePlanCharge',
'rate_plan_charge_tier': 'RatePlanChargeTier'
}
def __init__(self, rate_plan_charge=None, rate_plan_charge_tier=None): # noqa: E501
"""AmendmentRatePlanChargeData - a model defined in Swagger""" # noqa: E501
self._rate_plan_charge = None
self._rate_plan_charge_tier = None
self.discriminator = None
self.rate_plan_charge = rate_plan_charge
if rate_plan_charge_tier is not None:
self.rate_plan_charge_tier = rate_plan_charge_tier
@property
def rate_plan_charge(self):
"""Gets the rate_plan_charge of this AmendmentRatePlanChargeData. # noqa: E501
:return: The rate_plan_charge of this AmendmentRatePlanChargeData. # noqa: E501
:rtype: AmendmentRatePlanChargeDataRatePlanCharge
"""
return self._rate_plan_charge
@rate_plan_charge.setter
def rate_plan_charge(self, rate_plan_charge):
"""Sets the rate_plan_charge of this AmendmentRatePlanChargeData.
:param rate_plan_charge: The rate_plan_charge of this AmendmentRatePlanChargeData. # noqa: E501
:type: AmendmentRatePlanChargeDataRatePlanCharge
"""
if rate_plan_charge is None:
raise ValueError("Invalid value for `rate_plan_charge`, must not be `None`") # noqa: E501
self._rate_plan_charge = rate_plan_charge
@property
def rate_plan_charge_tier(self):
"""Gets the rate_plan_charge_tier of this AmendmentRatePlanChargeData. # noqa: E501
# noqa: E501
:return: The rate_plan_charge_tier of this AmendmentRatePlanChargeData. # noqa: E501
:rtype: list[AmendmentRatePlanChargeTier]
"""
return self._rate_plan_charge_tier
@rate_plan_charge_tier.setter
def rate_plan_charge_tier(self, rate_plan_charge_tier):
"""Sets the rate_plan_charge_tier of this AmendmentRatePlanChargeData.
# noqa: E501
:param rate_plan_charge_tier: The rate_plan_charge_tier of this AmendmentRatePlanChargeData. # noqa: E501
:type: list[AmendmentRatePlanChargeTier]
"""
self._rate_plan_charge_tier = rate_plan_charge_tier
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AmendmentRatePlanChargeData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AmendmentRatePlanChargeData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
637e683e2262bb9da1eeb06e515e1be31b876e13 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /tools/swarming_client/named_cache.py | 87f3458856e823655bbdb2739e388255da1721b3 | [
"LGPL-2.1-only",
"BSD-3-Clause",
"MIT",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 11,791 | py | # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""This file implements Named Caches."""
import contextlib
import logging
import optparse
import os
import random
import re
import string
import sys
from utils import lru
from utils import file_path
from utils import fs
from utils import threading_utils
import local_caching
# Keep synced with task_request.py
CACHE_NAME_RE = re.compile(ur'^[a-z0-9_]{1,4096}$')
class Error(Exception):
"""Named cache specific error."""
class CacheManager(object):
"""Manages cache directories exposed to a task.
A task can specify that caches should be present on a bot. A cache is
tuple (name, path), where
name is a short identifier that describes the contents of the cache, e.g.
"git_v8" could be all git repositories required by v8 builds, or
"build_chromium" could be build artefacts of the Chromium.
path is a directory path relative to the task run dir. Cache installation
puts the requested cache directory at the path.
policies is a local_caching.CachePolicies instance.
"""
def __init__(self, root_dir, policies):
"""Initializes NamedCaches.
|root_dir| is a directory for persistent cache storage.
"""
assert isinstance(root_dir, unicode), root_dir
assert file_path.isabs(root_dir), root_dir
self.root_dir = root_dir
self._policies = policies
self._lock = threading_utils.LockWithAssert()
# LRU {cache_name -> cache_location}
# It is saved to |root_dir|/state.json.
self._lru = None
@contextlib.contextmanager
def open(self, time_fn=None):
"""Opens NamedCaches for mutation operations, such as install.
Only one caller can open the cache manager at a time. If the same thread
calls this function after opening it earlier, the call will deadlock.
time_fn is a function that returns timestamp (float) and used to take
timestamps when new caches are requested.
Returns a context manager that must be closed as soon as possible.
"""
with self._lock:
state_path = os.path.join(self.root_dir, u'state.json')
assert self._lru is None, 'acquired lock, but self._lru is not None'
if os.path.isfile(state_path):
try:
self._lru = lru.LRUDict.load(state_path)
except ValueError:
logging.exception('failed to load named cache state file')
logging.warning('deleting named caches')
file_path.rmtree(self.root_dir)
self._lru = self._lru or lru.LRUDict()
if time_fn:
self._lru.time_fn = time_fn
try:
yield
finally:
file_path.ensure_tree(self.root_dir)
self._lru.save(state_path)
self._lru = None
def __len__(self):
"""Returns number of items in the cache.
NamedCache must be open.
"""
return len(self._lru)
def get_oldest(self):
"""Returns name of the LRU cache or None.
NamedCache must be open.
"""
self._lock.assert_locked()
try:
return self._lru.get_oldest()[0]
except KeyError:
return None
def get_timestamp(self, name):
"""Returns timestamp of last use of an item.
NamedCache must be open.
Raises KeyError if cache is not found.
"""
self._lock.assert_locked()
assert isinstance(name, basestring), name
return self._lru.get_timestamp(name)
@property
def available(self):
"""Returns a set of names of available caches.
NamedCache must be open.
"""
self._lock.assert_locked()
return self._lru.keys_set()
def install(self, path, name):
"""Moves the directory for the specified named cache to |path|.
NamedCache must be open. path must be absolute, unicode and must not exist.
Raises Error if cannot install the cache.
"""
self._lock.assert_locked()
logging.info('Installing named cache %r to %r', name, path)
try:
_check_abs(path)
if os.path.isdir(path):
raise Error('installation directory %r already exists' % path)
rel_cache = self._lru.get(name)
if rel_cache:
abs_cache = os.path.join(self.root_dir, rel_cache)
if os.path.isdir(abs_cache):
logging.info('Moving %r to %r', abs_cache, path)
file_path.ensure_tree(os.path.dirname(path))
fs.rename(abs_cache, path)
self._remove(name)
return
logging.warning('directory for named cache %r does not exist', name)
self._remove(name)
# The named cache does not exist, create an empty directory.
# When uninstalling, we will move it back to the cache and create an
# an entry.
file_path.ensure_tree(path)
except (OSError, Error) as ex:
raise Error(
'cannot install cache named %r at %r: %s' % (
name, path, ex))
def uninstall(self, path, name):
"""Moves the cache directory back. Opposite to install().
NamedCache must be open. path must be absolute and unicode.
Raises Error if cannot uninstall the cache.
"""
logging.info('Uninstalling named cache %r from %r', name, path)
try:
_check_abs(path)
if not os.path.isdir(path):
logging.warning(
'Directory %r does not exist anymore. Cache lost.', path)
return
rel_cache = self._lru.get(name)
if rel_cache:
# Do not crash because cache already exists.
logging.warning('overwriting an existing named cache %r', name)
create_named_link = False
else:
rel_cache = self._allocate_dir()
create_named_link = True
# Move the dir and create an entry for the named cache.
abs_cache = os.path.join(self.root_dir, rel_cache)
logging.info('Moving %r to %r', path, abs_cache)
file_path.ensure_tree(os.path.dirname(abs_cache))
fs.rename(path, abs_cache)
self._lru.add(name, rel_cache)
if create_named_link:
# Create symlink <root_dir>/<named>/<name> -> <root_dir>/<short name>
# for user convenience.
named_path = self._get_named_path(name)
if os.path.exists(named_path):
file_path.remove(named_path)
else:
file_path.ensure_tree(os.path.dirname(named_path))
try:
fs.symlink(abs_cache, named_path)
logging.info('Created symlink %r to %r', named_path, abs_cache)
except OSError:
# Ignore on Windows. It happens when running as a normal user or when
# UAC is enabled and the user is a filtered administrator account.
if sys.platform != 'win32':
raise
except (OSError, Error) as ex:
raise Error(
'cannot uninstall cache named %r at %r: %s' % (
name, path, ex))
def trim(self):
"""Purges cache entries that do not comply with the cache policies.
NamedCache must be open.
Returns:
Number of caches deleted.
"""
self._lock.assert_locked()
if not os.path.isdir(self.root_dir):
return 0
removed = []
def _remove_lru_file():
"""Removes the oldest LRU entry. LRU must not be empty."""
name, _data = self._lru.get_oldest()
logging.info('Removing named cache %r', name)
self._remove(name)
removed.append(name)
# Trim according to maximum number of items.
while len(self._lru) > self._policies.max_items:
_remove_lru_file()
# Trim according to maximum age.
if self._policies.max_age_secs:
cutoff = self._lru.time_fn() - self._policies.max_age_secs
while self._lru:
_name, (_content, timestamp) = self._lru.get_oldest()
if timestamp >= cutoff:
break
_remove_lru_file()
# Trim according to minimum free space.
if self._policies.min_free_space:
while True:
free_space = file_path.get_free_space(self.root_dir)
if not self._lru or free_space >= self._policies.min_free_space:
break
_remove_lru_file()
# TODO(maruel): Trim according to self._policies.max_cache_size. Do it last
# as it requires counting the size of each entry.
# TODO(maruel): Trim empty directories. An empty directory is not a cache,
# something needs to be in it.
return len(removed)
_DIR_ALPHABET = string.ascii_letters + string.digits
def _allocate_dir(self):
"""Creates and returns relative path of a new cache directory."""
# We randomly generate directory names that have two lower/upper case
# letters or digits. Total number of possibilities is (26*2 + 10)^2 = 3844.
abc_len = len(self._DIR_ALPHABET)
tried = set()
while len(tried) < 1000:
i = random.randint(0, abc_len * abc_len - 1)
rel_path = (
self._DIR_ALPHABET[i / abc_len] +
self._DIR_ALPHABET[i % abc_len])
if rel_path in tried:
continue
abs_path = os.path.join(self.root_dir, rel_path)
if not fs.exists(abs_path):
return rel_path
tried.add(rel_path)
raise Error('could not allocate a new cache dir, too many cache dirs')
def _remove(self, name):
"""Removes a cache directory and entry.
NamedCache must be open.
Returns:
Number of caches deleted.
"""
self._lock.assert_locked()
rel_path = self._lru.get(name)
if not rel_path:
return
named_dir = self._get_named_path(name)
if fs.islink(named_dir):
fs.unlink(named_dir)
abs_path = os.path.join(self.root_dir, rel_path)
if os.path.isdir(abs_path):
file_path.rmtree(abs_path)
self._lru.pop(name)
def _get_named_path(self, name):
return os.path.join(self.root_dir, 'named', name)
def add_named_cache_options(parser):
group = optparse.OptionGroup(parser, 'Named caches')
group.add_option(
'--named-cache',
dest='named_caches',
action='append',
nargs=2,
default=[],
help='A named cache to request. Accepts two arguments, name and path. '
'name identifies the cache, must match regex [a-z0-9_]{1,4096}. '
'path is a path relative to the run dir where the cache directory '
'must be put to. '
'This option can be specified more than once.')
group.add_option(
'--named-cache-root',
help='Cache root directory. Default=%default')
parser.add_option_group(group)
def process_named_cache_options(parser, options):
"""Validates named cache options and returns a CacheManager."""
if options.named_caches and not options.named_cache_root:
parser.error('--named-cache is specified, but --named-cache-root is empty')
for name, path in options.named_caches:
if not CACHE_NAME_RE.match(name):
parser.error(
'cache name %r does not match %r' % (name, CACHE_NAME_RE.pattern))
if not path:
parser.error('cache path cannot be empty')
if options.named_cache_root:
# Make these configurable later if there is use case but for now it's fairly
# safe values.
# In practice, a fair chunk of bots are already recycled on a daily schedule
# so this code doesn't have any effect to them, unless they are preloaded
# with a really old cache.
policies = local_caching.CachePolicies(
# 1TiB.
max_cache_size=1024*1024*1024*1024,
min_free_space=options.min_free_space,
max_items=50,
# 3 weeks.
max_age_secs=21*24*60*60)
root_dir = unicode(os.path.abspath(options.named_cache_root))
return CacheManager(root_dir, policies)
return None
def _check_abs(path):
if not isinstance(path, unicode):
raise Error('named cache installation path must be unicode')
if not os.path.isabs(path):
raise Error('named cache installation path must be absolute')
| [
"[email protected]"
] | |
d0fe60462342ff39de01b1f8df7a2a0e91c55604 | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /Darlington/phase1/python Basic 1/day 6 solution/qtn1.py | 79447fcdb8f1064021152620bb180e2fb9552ea6 | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 108 | py | #program to check whether a file exists
import os.path
open('abc.txt', 'w')
print(os.path.isfile('abc.txt')) | [
"[email protected]"
] | |
4d352594e3d2b3e79f5ea48063fc2959abef8c5b | 3c31584c1b661195a567ffd2603d30cb2e270493 | /codeforces/864/D.py | 86f83b4c6f59f1a9df0e1846a628d8b628115a0c | [] | no_license | ku-nal/Codeforces | c7f621e35b5d4eea1ed11276ee8e91031252ca91 | df43c2fcbcfd1c9f96b6fe79c7abc9ddee054cb7 | refs/heads/main | 2023-04-10T19:00:40.559074 | 2021-04-27T15:15:51 | 2021-04-27T15:15:51 | 362,154,763 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,877 | py | #===========Template===============
from io import BytesIO, IOBase
import sys,os
inpl=lambda:list(map(int,input().split()))
inpm=lambda:map(int,input().split())
inpi=lambda:int(input())
inp=lambda:input()
rev,ra,l=reversed,range,len
P=print
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def factors(n):
return list(set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))))
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
def input(): return sys.stdin.readline().rstrip("\r\n")
#=========I/p O/p ========================================#
from bisect import bisect_left as bl
from bisect import bisect_right as br
import sys,operator,math,operator
from collections import Counter
import random
from functools import reduce
#==============To chaliye shuru krte he ====================#
n=inpi()
li=inpl()
omap=Counter(li)
arr=[]
for i in ra(1,n+1):
if i not in omap:
arr.append(i)
c,ans=0,0
omap1={}
for i in ra(n):
if c<len(arr) and omap[li[i]]>1 and li[i] not in omap1:
if arr[c]>li[i]:
omap1[li[i]]=1
omap[li[i]]-=1
else:
omap[li[i]]-=1
li[i]=arr[c]
ans+=1
c+=1
elif omap[li[i]]>=1 and li[i] in omap1:
omap[li[i]]-=1
li[i]=arr[c]
ans+=1
c+=1
P(ans)
P(*li)
| [
"[email protected]"
] | |
24e479bc14d88a4d856866a9475952562dcc6177 | da7a165522daea7c346693c5f32850017c482967 | /abc51-100/abc051/c.py | ad218ed2b388081ae9705ec0c52f82e5979ea0be | [] | no_license | SShayashi/ABC | 19f8750919208c5ff8935638dbaab941c255f914 | 3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c | refs/heads/master | 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | def m():
sx, sy, tx, ty = map(int, input().split())
X = tx-sx
Y = ty-sy
ans = ""
# 一周
ans += "U" * Y
ans += "R" * X
ans += "D" * Y
ans += "L" * X
# 左に一つずらして目的地まで
ans += "L"
ans += "U" * (Y+1)
ans += "R" * (X+1)
ans += "D"
# 右にずれて開始地点まで
ans += "R"
ans += "D" * (Y+1)
ans += "L" * (X+1)
ans += "U"
return ans
print(m()) | [
"[email protected]"
] | |
cb46e9e19fae34da7ec6451e0dfeb1b3222bff77 | 4c34dca6c12dd36e9e8eb360a2cbbb3f39a50e20 | /scratchpad/scratch.py | 4698f26325561e0b97de44eeba25d723830a5498 | [
"BSD-3-Clause"
] | permissive | PlumpMath/m2py | a35e0265d9e3c46214c9560b46a9e59df63c9a9b | 4a8f754f04adb151b1967fe13b8f80b4ec169560 | refs/heads/master | 2021-01-18T20:16:37.973122 | 2015-01-30T11:29:15 | 2015-01-30T11:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
txt = """
>>> t = [1, 2, 3, 4, 5]
>>> map(lambda x: x**2, t)
[1, 4, 9, 16, 25]
>>> t
[1, 2, 3, 4, 5]
>>> zip(t, map(lambda x: x**2, t))
[(1, 1), (2, 4), (3, 9), (4, 16), (5, 25)]
>>>
"""
def paste_run():
global txt
import re
from .utils import xclip
#txt = xclip()
#txt = txt.strip('\n').strip('\r')
#print txt
# Replace bad character
txt = txt.replace('’', "'")
# Remove lines non starting with >>>
lines = [x for x in txt.splitlines() if x.startswith(">>>")]
# Remove >>> from beginning of lines
lines = [x.split(">>>")[1].strip() for x in lines]
#nextxt = "\n".join(lines)
#exec(nextxt)
for line in lines:
print(">>> ", line)
if not line:
continue
if re.match(".*=.*", line):
exec(line)
else:
print(eval(line))
paste_run() | [
"[email protected]"
] | |
3a8d9c6cc2b7dc816df485c10119a6be5f1084ee | c37d00101c6f6db5a3e61e92c06f61b98268a509 | /eemeter/__init__.py | e1f01a1cf4649392821a32e45cb2637c066037ac | [
"MIT"
] | permissive | impactlab/eemeter | 731499b3ba7549460ab786b482e9e7d9b6b29cef | 57d85a1f6d3ba1069ee0301b1b400bc27ff7f308 | refs/heads/master | 2020-04-05T23:47:53.255430 | 2016-08-16T00:17:14 | 2016-08-16T00:17:14 | 30,039,517 | 29 | 14 | null | 2016-08-12T22:21:02 | 2015-01-29T20:24:41 | Jupyter Notebook | UTF-8 | Python | false | false | 116 | py | # Version
VERSION = (0, 4, 8)
def get_version():
return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])
| [
"[email protected]"
] | |
501d97a1367b23e6209650cac4c62ceab7531ec4 | ee1eed00f04fe4050a9b7d9761a76af37842b8b1 | /dynamicportofolio/migrations/0001_initial.py | 9c57f02e620c776d880992704da0cded5c914ad2 | [] | no_license | dimansion/dango | 707b738d1df735a1019e44a53b095d9af4e2a44a | 4239531849cef3f6c00ff3ba1e38c768a8648e0f | refs/heads/master | 2020-12-24T08:30:31.614872 | 2016-09-07T13:45:33 | 2016-09-07T13:45:33 | 36,284,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(null=True, blank=True)),
],
),
]
| [
"[email protected]"
] | |
2e6d529fae8f08a9ea454cbd51d7dad90e159848 | 2611f7e793c52d7bc60be2772cf66b3704c05876 | /14B-088/HI/analysis/rotation_curves/rotsub_channels_movie.py | dd2ef9206c76b7ce8fe348324c48c81dbc5dd5fa | [
"MIT"
] | permissive | e-koch/VLA_Lband | 15e89878f554a70f0bc2a19cb7c5cb1b825f3ced | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | refs/heads/master | 2022-11-29T01:43:22.069806 | 2020-12-21T19:48:22 | 2020-12-21T19:48:22 | 42,543,618 | 2 | 2 | MIT | 2022-11-25T15:38:46 | 2015-09-15T20:06:58 | Python | UTF-8 | Python | false | false | 2,386 | py | import numpy as np
import matplotlib.pyplot as p
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import average_beams
from astropy.utils.console import ProgressBar
from astropy import units as u
from astropy.visualization import AsinhStretch
from astropy.visualization.mpl_normalize import ImageNormalize
import warnings
import matplotlib.animation as anim
from paths import fourteenB_HI_data_wGBT_path, allfigs_path, fourteenB_wGBT_HI_file_dict
from constants import hi_freq
'''
Channel plots of the rotation subtracted HI cube combined into a movie!
Borrowing code from @keflavich:
https://github.com/keflavich/paper_w51_evla/blob/master/plot_codes/h77a_layers.py
'''
cube = SpectralCube.read(fourteenB_wGBT_HI_file_dict['RotSube_Cube'])
# Begin channel map code here
# integrate over velocities to make channel maps of a set width
vstart = 0 # channels
vend = cube.shape[0]
vstep = 10
all_slabs = np.arange(vstart, vend + vstep, vstep, dtype=int)
# Define the average beam
try:
beam = cube.beam
except AttributeError:
beam = average_beams(cube.beams)
layers = \
[cube[start:end].moment0().value *
beam.jtok(hi_freq) / 1000. * u.km / u.s
for start, end in
ProgressBar(zip(all_slabs[:-1], all_slabs[1:]))]
# Scale all to the maximum
mx = np.max([np.nanmax(x).value for x in layers])
spec_axis = cube.spectral_axis.to(u.km / u.s).value
center_vels = [(spec_axis[start] + spec_axis[min(end, cube.shape[0] - 1)]) / 2. for start, end in
zip(all_slabs[:-1], all_slabs[1:])]
pb = ProgressBar(len(center_vels))
fig = p.figure()
ax = fig.add_subplot(111)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
p.tight_layout()
def updater(i):
pb.update()
layer = layers[i]
im = ax.imshow(layer.value, origin='lower',
norm=ImageNormalize(vmin=-0.001,
vmax=mx,
stretch=AsinhStretch()),
cmap=p.cm.gray_r)
# ax.annotate("${0:.0f} km/s$".format(center_vels[i]),
# (0.53, 0.9),
# xycoords='axes fraction', color='k',
# fontsize=15.5)
ani = anim.FuncAnimation(fig, updater, range(len(center_vels)))
# p.show()
writer = anim.writers['ffmpeg'](fps=4)
ani.save(allfigs_path("m33_rotsub_movie.mp4"), writer=writer, dpi=300)
| [
"[email protected]"
] | |
5623d1d86e28812e453b1b0d2b6bad08204a8e8a | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/web_editor/models/test_models.py | 282b703c03d208e9b44cd2a107f060d5c20fe103 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 1,266 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class ConverterTest(models.Model):
_name = 'web_editor.converter.test'
_description = 'Web Editor Converter Test'
# disable translation export for those brilliant field labels and values
_translate = False
char = fields.Char()
integer = fields.Integer()
float = fields.Float()
numeric = fields.Float(digits=(16, 2))
many2one = fields.Many2one('web_editor.converter.test.sub')
binary = fields.Binary(attachment=False)
date = fields.Date()
datetime = fields.Datetime()
selection_str = fields.Selection([
('A', "Qu'il n'est pas arrivé à Toronto"),
('B', "Qu'il était supposé arriver à Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La réponse D"),
], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
u"qu'il fait une escale technique à St Claude, on dit:")
html = fields.Html()
text = fields.Text()
class ConverterTestSub(models.Model):
_name = 'web_editor.converter.test.sub'
_description = 'Web Editor Converter Subtest'
name = fields.Char()
| [
"[email protected]"
] | |
9bfefdedb6210274b7005f49c69bd92d3e256979 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/215/54936/submittedfiles/estatistica.py | 2396e9941096556e8218859d3628ab019b398ed1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | # -*- coding: utf-8 -*-
n=int(input('digite n:'))
a=[]
b=[]
somaA=0
difquadA=0
sdqA=0
somaB=0
difquadB=0
sdqB=0
for z in range(1 ,n+1 ,1):
valorA=float(input('valor listaA:'))
a.append(valorA)
for i in range (0,len(a),1):
somaA=somaA=a[i]
mediaA=somaA/len(a)
for j in range (0 , len(a), 1):
difquadA=(a[j]-mediaA)**2
sdqA=sqdA+difquadA
varA=sdqA/(len(a)-1)
devioA=varA**0.5
for z in range (1,n+1,1):
valorB=float(input('valor listaB:'))
b.append(valorB)
for i in range (0,len(a),1):
somaB=somaB=b[i]
mediaB=somaB/len(b)
for j in range (0,len(a),1):
difquadB=(b[j]-mediaB)**2
sdqB=sqdB+difquadB
varB=sdqB/(len(b)-1)
devioB=varB**0.5
print('%.2f' %mediaA)
print('%.2f' %devioA)
print('%.2f' %mediaB)
print('%.2f' %devioB) | [
"[email protected]"
] | |
48d258b6d821fc4ab55853b8287503e12dcf9ba2 | 585bac463cb1919ac697391ff130bbced73d6307 | /105_ConstructBinaryTreeFromPreorderAndInorderTraversal/solution_1.py | 729fb58003e0825a66c681dcd89d745020540bf4 | [] | no_license | llgeek/leetcode | ce236cf3d3e3084933a7a4a5e8c7766f7f407285 | 4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c | refs/heads/master | 2021-01-22T23:44:13.318127 | 2020-03-11T00:59:05 | 2020-03-11T00:59:05 | 85,667,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
self.preidx = 0
def helper(sidx, eidx):
if sidx > eidx:
return None
root = TreeNode(preorder[self.preidx])
self.preidx += 1
i = sidx
while i <= eidx:
if inorder[i] == root.val:
break
i += 1
root.left = helper(sidx, i-1)
root.right = helper(i+1, eidx)
return root
return helper(0, len(inorder)-1) | [
"[email protected]"
] | |
5e4f0f125a0d414df5abb90f65a10363540cd67a | d204538b66b477fea7289c6ca9801919f6fbd09e | /demo/start_demo.py | 9fbf7bd3389d6acb37fb7a02802831d838bc6f38 | [
"Apache-2.0"
] | permissive | TrendingTechnology/openchat | efb8194f38bc809ffca165d65ae13c1f10771b84 | cee89e3acff33ef598bf3dfe6d2e13a418a9a0aa | refs/heads/main | 2023-03-26T03:33:52.876583 | 2021-03-04T05:17:28 | 2021-03-04T05:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from openchat import OpenChat
from demo.web_demo_env import WebDemoEnv
OpenChat(model="blenderbot", size="large", env=WebDemoEnv()) | [
"[email protected]"
] | |
e70b9829664b9c6f71b685b5dd938706773b2eac | 91add811783a4f19c7474e92ee87b91d9035a9ae | /segmentation_pytorch/models/unet/layers.py | eca45cbd03bdfc02b7e567b8042babd3c2a61240 | [] | no_license | NIRVANALAN/PyTorch_UNOdeMSegNet | fb0f0f992444dd7b41102b3896e9f2866873fee4 | 49b577cef650a4bcb3d5c4879bef2d97982e5f4c | refs/heads/master | 2022-12-22T15:14:22.929861 | 2020-03-21T05:22:40 | 2020-03-21T05:22:40 | 201,301,445 | 3 | 3 | null | 2022-12-08T03:14:09 | 2019-08-08T16:58:08 | Jupyter Notebook | UTF-8 | Python | false | false | 2,206 | py | import torch
import torch.nn as nn
from .utils import init_weights
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n+1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size+(n_concat-2)*out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(
in_size, out_size, kernel_size=2, stride=2, padding=0)
else:
self.up = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(in_size, out_size, 1))
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1:
continue
init_weights(m, init_type='kaiming')
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0)
| [
"[email protected]"
] | |
e6cd00e49f7d1ca2bed65faf4373545c7d8492ce | 8698757521458c2061494258886e5d3cdfa6ff11 | /argo/core/network/Bernoulli.py | c79dc46905b9ebaf1716bb2d64646f650601ff94 | [
"MIT"
] | permissive | ricvo/argo | 546c91e84d618c4bc1bb79a6bc7cba01dca56d57 | a10c33346803239db8a64c104db7f22ec4e05bef | refs/heads/master | 2023-02-25T01:45:26.412280 | 2020-07-05T22:55:35 | 2020-07-05T22:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | import tensorflow as tf
import sonnet as snt
import numpy as np
from operator import xor
import types
import pdb
from abc import ABC, abstractmethod
from tensorflow_probability import distributions as tfd
from .AbstractModule import AbstractModule
class Bernoulli(AbstractModule):
def __init__(self, output_size=-1, output_shape=-1, initializers={}, regularizers={}, clip_value=0, dtype=None,
name='Bernoulli'):
super().__init__(name = name)
assert xor(output_size==-1, output_shape==-1), "Either output_size or output_shape mut be specified, not both"
if output_size!=-1:
self._output_shape = [output_size]
else:
self._output_shape = output_shape
self._initializers = initializers
self._regularizers = regularizers
self._clip_value = clip_value
self._dtype=dtype
def _build(self, inputs):
# create the layers for mean and covariance
output_shape = [-1] + self._output_shape
logits = tf.reshape(snt.Linear(np.prod(self._output_shape), initializers=self._initializers, regularizers=self._regularizers)(inputs),output_shape)
dtype = inputs.dtype
if self._dtype is not None:
dtype = self._dtype
if self._clip_value > 0:
probs = tf.nn.sigmoid(logits)
probs = tf.clip_by_value(probs, self._clip_value, 1 - self._clip_value)
bernoulli = tfd.Bernoulli(probs=probs, dtype=dtype)
else:
bernoulli = tfd.Bernoulli(logits=logits, dtype=dtype)
def reconstruction_node(self):
return self.mean()
bernoulli.reconstruction_node = types.MethodType(reconstruction_node, bernoulli)
def distribution_parameters(self):
return [self.mean()]
bernoulli.distribution_parameters = types.MethodType(distribution_parameters, bernoulli)
def get_probs(self):
return self.probs
bernoulli.get_probs = types.MethodType(get_probs, bernoulli)
return bernoulli
| [
"[email protected]"
] | |
2b59f94b6d0f7087c911b821203264dd57eac044 | 9dc0a06fd4ce161b7bb4b21a36f70b84c03d0feb | /pytorch_code/model_ops/lenet.py | 8fb86a5fdc47a99d7f6e881975f0976fe7ce3a29 | [] | no_license | chao1224/pytorch_distributed_nn | eaaa250344632256055fac471fc3345b69c15e42 | 7c692638d00afe95a5ba5ba9a163aa692fafe93a | refs/heads/master | 2021-07-21T03:49:56.083684 | 2017-10-27T21:41:40 | 2017-10-27T21:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,155 | py | import torch
from torch import nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch.autograd import Variable
from mpi4py import MPI
# we use LeNet here for our simple case
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
self.ceriation = nn.CrossEntropyLoss()
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = x.view(-1, 4*4*50)
x = self.fc1(x)
x = self.fc2(x)
#loss = self.ceriation(x, target)
return x
def name(self):
return 'lenet'
class LeNetSplit(nn.Module):
'''
this is a module that we split the module and do backward process layer by layer
please don't call this module for normal uses, this is a hack and run slower than
the automatic chain rule version
'''
def __init__(self):
super(LeNetSplit, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
self.maxpool2d = nn.MaxPool2d(2, stride=2)
self.relu = nn.ReLU()
self.full_modules = [self.conv1, self.conv2, self.fc1, self.fc2]
self._init_channel_index = len(self.full_modules)*2
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
self.output = []
self.input = []
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.conv1(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.maxpool2d(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.relu(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.conv2(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.maxpool2d(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.relu(x)
self.output.append(x)
x = x.view(-1, 4*4*50)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.fc1(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.fc2(x)
self.output.append(x)
return x
@property
def fetch_init_channel_index(self):
return self._init_channel_index
def backward(self, g, communicator, req_send_check):
mod_avail_index = len(self.full_modules)-1
#channel_index = len(self.full_modules)*2-2
channel_index = self._init_channel_index - 2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
output.backward(self.input[i+1].grad.data)
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
if mod_counters_[0] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
return req_send_check
def backward_signal_kill(self, g, communicator, req_send_check, cur_step):
'''
This killer is triggered by signals bcasting from master, channel of
signal is kept checking by each worker to determine if they're the
straggler
'''
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index - 2
mod_counters_ = [0]*len(self.full_modules)
# should kill flag
should_kill = False
for i, output in reversed(list(enumerate(self.output))):
############################ killing process on workers #####################################
for _ in range(10000):
status = MPI.Status()
communicator.Iprobe(0, 77, status)
if status.Get_source() == 0:
print("Worker {}, Cur Step: {} I'm the straggler, killing myself!".format(communicator.Get_rank(), cur_step))
tmp = communicator.recv(source=0, tag=77)
should_kill = True
break
if should_kill:
break
############################################################################################
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
output.backward(self.input[i+1].grad.data)
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
if mod_counters_[0] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
return req_send_check
def backward_timeout_kill(self, g, communicator, req_send_check):
"""do we even need this?"""
pass | [
"[email protected]"
] | |
02c3a4438f148ad6f4507b2fe5038d1f2d498bd3 | 144b18db9f190daf499df56f555cfc064bfa42f3 | /pysoa/test/plan/grammar/directives/time.py | 60158f54599153c7b636798e6b2e839efb164050 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | cache51/pysoa | 3eff93d9db7cc125ae016f29d294c5263fdfa692 | fd37d64dfefff01ff0f7f48e225e0d672b36b5db | refs/heads/master | 2020-03-15T14:25:56.935337 | 2018-05-04T14:56:04 | 2018-05-04T14:56:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | """
Directives for freezing time during test execution
"""
from __future__ import absolute_import, unicode_literals
import datetime
from pyparsing import Literal
from pysoa.test.plan.errors import FixtureSyntaxError
from pysoa.test.plan.grammar.directive import (
Directive,
ActionDirective,
register_directive,
VarValueGrammar
)
try:
from freezegun import freeze_time
except ImportError:
freeze_time = None
class FreezeTimeMixin(object):
@staticmethod
def parse_and_store_freeze_to(target, value, file_name, line_number):
if not freeze_time:
raise FixtureSyntaxError(
'Could not import freezegun to support freeze time syntax. Perhaps you need to install it?',
file_name,
line_number,
)
if value == 'now':
freeze_to = None
else:
try:
freeze_to = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise FixtureSyntaxError('Could not parse datetime value for time freeze', file_name, line_number)
target['_freezegun_freeze_time'] = freeze_to
@staticmethod
def start_freeze(target):
if '_freezegun_freeze_time' in target:
target['_freezegun_context'] = freeze_time(target['_freezegun_freeze_time'])
target['_freezegun_context'].start()
@staticmethod
def stop_freeze(target):
if '_freezegun_context' in target:
target['_freezegun_context'].stop()
del target['_freezegun_context']
class FreezeTimeTestPlanDirective(Directive, FreezeTimeMixin):
"""
Freeze Time using freezegun for the duration of an entire test plan.
This will span all actions within the plan, no matter where the statement is located.
"""
@classmethod
def name(cls):
return 'freeze_time_test'
@classmethod
def get_full_grammar(cls):
return (
Literal('freeze time') +
':' +
VarValueGrammar
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
self.parse_and_store_freeze_to(test_case, parse_results.value, file_name, line_number)
self.start_freeze(test_case)
def post_parse_test_case(self, test_case):
self.stop_freeze(test_case)
def set_up_test_case(self, test_case, test_fixture, **kwargs):
self.start_freeze(test_case)
def tear_down_test_case(self, test_case, test_fixture, **kwargs):
self.stop_freeze(test_case)
def assert_test_case_action_results(self, *args, **kwargs):
pass
class FreezeTimeActionDirective(ActionDirective, FreezeTimeMixin):
"""
Freeze Time using freezegun for the duration of a single action.
"""
@classmethod
def name(cls):
return 'freeze_time_action'
@classmethod
def get_full_grammar(cls):
return (
super(FreezeTimeActionDirective, cls).get_full_grammar() +
Literal('freeze time') +
':' +
VarValueGrammar
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
self.parse_and_store_freeze_to(action_case, parse_results.value, file_name, line_number)
self.start_freeze(action_case)
def post_parse_test_case_action(self, action_case, test_case):
self.stop_freeze(action_case)
def set_up_test_case_action(self, action_name, action_case, test_case, test_fixture, **kwargs):
self.start_freeze(action_case)
def tear_down_test_case_action(self, action_name, action_case, test_case, test_fixture, **kwargs):
self.stop_freeze(action_case)
def assert_test_case_action_results(self, *args, **kwargs):
pass
register_directive(FreezeTimeTestPlanDirective)
register_directive(FreezeTimeActionDirective)
| [
"[email protected]"
] | |
8e468456067fa4b93a3f6a54a9cf2fc969db6b19 | 1e1f7d3687b71e69efa958d5bbda2573178f2acd | /payroll/doctype/attendance/attendance.py | 664a3cb0a3cc17293706a9f4e4f0e4d3d86d2577 | [] | no_license | ravidey/erpnext | 680a31e2a6b957fd3f3ddc5fd6b383d8ea50f515 | bb4b9bfa1551226a1d58fcef0cfe8150c423f49d | refs/heads/master | 2021-01-17T22:07:36.049581 | 2011-06-10T07:32:01 | 2011-06-10T07:32:01 | 1,869,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,537 | py | # Please edit this list and import only required elements
import webnotes
from webnotes.utils import add_days, add_months, add_years, cint, cstr, date_diff, default_fields, flt, fmt_money, formatdate, generate_hash, getTraceback, get_defaults, get_first_day, get_last_day, getdate, has_common, month_name, now, nowdate, replace_newlines, sendmail, set_default, str_esc_quote, user_format, validate_email_add
from webnotes.model import db_exists
from webnotes.model.doc import Document, addchild, removechild, getchildren, make_autoname, SuperDocType
from webnotes.model.doclist import getlist, copy_doclist
from webnotes.model.code import get_obj, get_server_obj, run_server_obj, updatedb, check_syntax
from webnotes import session, form, is_testing, msgprint, errprint
set = webnotes.conn.set
sql = webnotes.conn.sql
get_value = webnotes.conn.get_value
in_transaction = webnotes.conn.in_transaction
convert_to_lists = webnotes.conn.convert_to_lists
# -----------------------------------------------------------------------------------------
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
# Notification objects
self.badge_obj = get_obj('Badge Settings','Badge Settings','','',1)
#autoname function
def autoname(self):
self.doc.name = make_autoname(self.doc.naming_series+'.#####')
#get employee name based on employee id selected
def get_emp_name(self):
emp_nm = sql("select employee_name from `tabEmployee` where name=%s", self.doc.employee)
#this is done because sometimes user entered wrong employee name while uploading employee attendance
set(self.doc, 'employee_name', emp_nm and emp_nm[0][0] or '')
ret = { 'employee_name' : emp_nm and emp_nm[0][0] or ''}
return str(ret)
#validation for duplicate record
def validate_duplicate_record(self):
res = sql("select name from `tabAttendance` where employee = '%s' and att_date = '%s' and not name = '%s' and docstatus = 1"%(self.doc.employee,self.doc.att_date, self.doc.name))
if res:
msgprint("Employee's attendance already marked.")
raise Exception
#validation - leave_type is mandatory for status absent/ half day else not required to entered.
def validate_status(self):
if self.doc.status == 'Present' and self.doc.leave_type:
msgprint("You can not enter leave type for attendance status 'Present'")
raise Exception
elif (self.doc.status == 'Absent' or self.doc.status == 'Half Day') and not self.doc.leave_type:
msgprint("Please enter leave type for attendance status 'Absent'")
raise Exception
#check for already record present in leave transaction for same date
def check_leave_record(self):
if self.doc.status == 'Present':
chk = sql("select name from `tabLeave Transaction` where employee=%s and (from_date <= %s and to_date >= %s) and status = 'Submitted' and leave_transaction_type = 'Deduction' and docstatus!=2", (self.doc.employee, self.doc.att_date, self.doc.att_date))
if chk:
msgprint("Leave Application created for employee "+self.doc.employee+" whom you are trying to mark as 'Present' ")
raise Exception
#For absent/ half day record - check for leave balances of the employees
def validate_leave_type(self):
if not self.doc.status =='Present' and self.doc.leave_type not in ('Leave Without Pay','Compensatory Off'):
#check for leave allocated to employee from leave transaction
ret = sql("select name from `tabLeave Transaction` where employee = '%s' and leave_type = '%s' and leave_transaction_type = 'Allocation' and fiscal_year = '%s'"%(self.doc.employee,self.doc.leave_type,self.doc.fiscal_year))
#if leave allocation is present then calculate leave balance i.e. sum(allocation) - sum(deduction)
if ret:
q1 = 'SUM(CASE WHEN leave_transaction_type = "Allocation" THEN total_leave ELSE 0 END)-SUM(CASE WHEN leave_transaction_type = "Deduction" THEN total_leave ELSE 0 END)'
q2 = "select %s from `tabLeave Transaction` where employee = '%s' and leave_type = '%s' and fiscal_year = '%s' and docstatus = 1"
res = sql(q2%(q1,self.doc.employee,self.doc.leave_type,self.doc.fiscal_year))
if res:
if self.doc.status == 'Absent' and flt(res[0][0]) < 1:
msgprint("%s balances are insufficient to cover a day absence, please select other leave type."%self.doc.leave_type)
raise Exception
if self.doc.status == 'Half Day' and flt(res[0][0]) < 0.5:
msgprint("%s balances are insufficient to cover a half day absence, please select other leave type."%self.doc.leave_type)
raise Exception
else:
msgprint("Leave Allocation for employee %s not done.\n You can allocate leaves from HR -> Leave Transaction OR HR -> Leave Control Panel."%self.doc.employee)
raise Exception
def validate_fiscal_year(self):
fy=sql("select year_start_date from `tabFiscal Year` where name='%s'"% self.doc.fiscal_year)
ysd=fy and fy[0][0] or ""
yed=add_days(str(ysd),365)
if str(self.doc.att_date) < str(ysd) or str(self.doc.att_date) > str(yed):
msgprint("'%s' Not Within The Fiscal Year selected"%(self.doc.att_date))
raise Exception
def validate_att_date(self):
import datetime
if getdate(self.doc.att_date)>getdate(datetime.datetime.now().date().strftime('%Y-%m-%d')):
msgprint("Attendance can not be marked for future dates")
raise Exception
# Validate employee
#-------------------
def validate_employee(self):
emp = sql("select name, status from `tabEmployee` where name = '%s'" % self.doc.employee)
if not emp:
msgprint("Employee: %s does not exists in the system" % self.doc.employee, raise_exception=1)
elif emp[0][1] != 'Active':
msgprint("Employee: %s is not Active" % self.doc.employee, raise_exception=1)
# validate...
def validate(self):
self.validate_fiscal_year()
self.validate_att_date()
#self.validate_leave_type()
self.validate_duplicate_record()
#self.validate_status()
self.check_leave_record()
def on_update(self):
#self.validate()
#this is done because sometimes user entered wrong employee name while uploading employee attendance
x=self.get_emp_name()
def on_submit(self):
#this is done because while uploading attendance chnage docstatus to 1 i.e. submit
set(self.doc,'docstatus',1)
pass
| [
"[email protected]"
] | |
fd24b3900bc159123582a764faa95efbf5f54eef | 99aa9b2be5199bf1b2f670bc9bb1a5bc7cec1c89 | /OA/MS/Numbers With Equal Digit Sum.py | 510e263aff6f09a6c2e3936c708e1801d3888015 | [] | no_license | SimonFans/LeetCode | 5196e85dec886b18cb2350419a4a2ae3c751966c | 0a34a19bb0979d58b511822782098f62cd86b25e | refs/heads/master | 2023-02-08T00:49:30.916655 | 2023-01-31T06:32:32 | 2023-01-31T06:32:32 | 145,938,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | def find_digit_sum(num):
val = 0
while num:
val += num % 10
num //= 10
return val
def num_digit_equal_sum(arr):
digit_sum_map = {}
max_val = -1
for num in arr:
digit_sum = find_digit_sum(num)
if digit_sum in digit_sum_map:
other_val = digit_sum_map[digit_sum]
max_val = max(max_val, other_val + num)
digit_sum_map[digit_sum] = max(other_val, num)
else:
digit_sum_map[digit_sum] = num
return max_val
| [
"[email protected]"
] | |
0fb3c1a5ddf254ca4d04fb76e8f9943dfbef7bf9 | 738e2f18c6ca259fe3a6b0d4d70efd32d83a8758 | /generate_bind_conf | b4eb80756ab0933abf30e137d32e4a0ab38762c8 | [] | no_license | nicferrier/secondarys | 9f1e5a1abb616b1a8346be785de33f5667f44762 | 1d5998750686ec27ac2cfbe7542c60e3a6c33ad6 | refs/heads/master | 2016-09-05T19:13:51.949494 | 2012-06-17T21:42:15 | 2012-06-17T21:42:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/usr/bin/env python
import csv
import sys
for entry in csv.reader(sys.stdin.readlines()):
print "zone \"%s\" {" % entry[0]
print "\ttype slave;"
print "\tfile \"/var/cache/bind/db.%s\";" % entry[0]
print "\tmasters { %s; };" % entry[1]
print "};"
| [
"[email protected]"
] | ||
791da6d75d63d268682d3c474ae43e3c13d93092 | ac216a2cc36f91625e440247986ead2cd8cce350 | /appengine/monorail/features/hotlistpeople.py | 45d63a21870456083afd38fb2703dfc4f193d243 | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 10,148 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Classes to implement the hotlistpeople page and related forms."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import time
from third_party import ezt
from features import hotlist_helpers
from features import hotlist_views
from framework import framework_helpers
from framework import framework_views
from framework import paginate
from framework import permissions
from framework import servlet
from framework import urls
from project import project_helpers
MEMBERS_PER_PAGE = 50
class HotlistPeopleList(servlet.Servlet):
_PAGE_TEMPLATE = 'project/people-list-page.ezt'
# Note: using the project's peoplelist page template. minor edits were
# to make it compatible with HotlistPeopleList
_MAIN_TAB_MODE = servlet.Servlet.HOTLIST_TAB_PEOPLE
def AssertBasePermission(self, mr):
super(HotlistPeopleList, self).AssertBasePermission(mr)
if not permissions.CanViewHotlist(
mr.auth.effective_ids, mr.perms, mr.hotlist):
raise permissions.PermissionException(
'User is now allowed to view the hotlist people list')
def GatherPageData(self, mr):
"""Build up a dictionary of data values to use when rendering the page."""
if mr.auth.user_id:
self.services.user.AddVisitedHotlist(
mr.cnxn, mr.auth.user_id, mr.hotlist_id)
all_members = (mr.hotlist.owner_ids +
mr.hotlist.editor_ids + mr.hotlist.follower_ids)
hotlist_url = hotlist_helpers.GetURLOfHotlist(
mr.cnxn, mr.hotlist, self.services.user)
with mr.profiler.Phase('gathering members on this page'):
users_by_id = framework_views.MakeAllUserViews(
mr.cnxn, self.services.user, all_members)
framework_views.RevealAllEmailsToMembers(mr.auth, mr.project, users_by_id)
untrusted_user_group_proxies = []
# TODO(jojwang): implement FindUntrustedGroups()
with mr.profiler.Phase('making member views'):
owner_views = self._MakeMemberViews(mr, mr.hotlist.owner_ids, users_by_id)
editor_views = self._MakeMemberViews(mr, mr.hotlist.editor_ids,
users_by_id)
follower_views = self._MakeMemberViews(mr, mr.hotlist.follower_ids,
users_by_id)
all_member_views = owner_views + editor_views + follower_views
url_params = [(name, mr.GetParam(name)) for name in
framework_helpers.RECOGNIZED_PARAMS]
# We are passing in None for the project_name because we are not operating
# under any project.
pagination = paginate.ArtifactPagination(
all_member_views, mr.GetPositiveIntParam('num', MEMBERS_PER_PAGE),
mr.GetPositiveIntParam('start'), None,
'%s%s' % (hotlist_url, urls.HOTLIST_PEOPLE), url_params=url_params)
offer_membership_editing = permissions.CanAdministerHotlist(
mr.auth.effective_ids, mr.perms, mr.hotlist)
offer_remove_self = (
not offer_membership_editing and
mr.auth.user_id and
mr.auth.user_id in mr.hotlist.editor_ids)
newly_added_views = [mv for mv in all_member_views
if str(mv.user.user_id) in mr.GetParam('new', [])]
return {
'is_hotlist': ezt.boolean(True),
'untrusted_user_groups': untrusted_user_group_proxies,
'pagination': pagination,
'initial_add_members': '',
'subtab_mode': None,
'initially_expand_form': ezt.boolean(False),
'newly_added_views': newly_added_views,
'offer_membership_editing': ezt.boolean(offer_membership_editing),
'offer_remove_self': ezt.boolean(offer_remove_self),
'total_num_owners': len(mr.hotlist.owner_ids),
'check_abandonment': ezt.boolean(True),
'initial_new_owner_username': '',
'placeholder': 'new-owner-username',
'open_dialog': ezt.boolean(False),
'viewing_user_page': ezt.boolean(True),
}
def ProcessFormData(self, mr, post_data):
"""Process the posted form."""
permit_edit = permissions.CanAdministerHotlist(
mr.auth.effective_ids, mr.perms, mr.hotlist)
can_remove_self = (
not permit_edit and
mr.auth.user_id and
mr.auth.user_id in mr.hotlist.editor_ids)
if not can_remove_self and not permit_edit:
raise permissions.PermissionException(
'User is not permitted to edit hotlist membership')
hotlist_url = hotlist_helpers.GetURLOfHotlist(
mr.cnxn, mr.hotlist, self.services.user)
if permit_edit:
if 'addbtn' in post_data:
return self.ProcessAddMembers(mr, post_data, hotlist_url)
elif 'removebtn' in post_data:
return self.ProcessRemoveMembers(mr, post_data, hotlist_url)
elif 'changeowners' in post_data:
return self.ProcessChangeOwnership(mr, post_data)
if can_remove_self:
if 'removeself' in post_data:
return self.ProcessRemoveSelf(mr, hotlist_url)
def _MakeMemberViews(self, mr, member_ids, users_by_id):
"""Return a sorted list of MemberViews for display by EZT."""
member_views = [hotlist_views.MemberView(
mr.auth.user_id, member_id, users_by_id[member_id],
mr.hotlist) for member_id in member_ids]
member_views.sort(key=lambda mv: mv.user.email)
return member_views
def ProcessChangeOwnership(self, mr, post_data):
new_owner_id_set = project_helpers.ParseUsernames(
mr.cnxn, self.services.user, post_data.get('changeowners'))
remain_as_editor = post_data.get('becomeeditor') == 'on'
if len(new_owner_id_set) != 1:
mr.errors.transfer_ownership = (
'Please add one valid user email.')
else:
new_owner_id = new_owner_id_set.pop()
if self.services.features.LookupHotlistIDs(
mr.cnxn, [mr.hotlist.name], [new_owner_id]):
mr.errors.transfer_ownership = (
'This user already owns a hotlist with the same name')
if mr.errors.AnyErrors():
self.PleaseCorrect(
mr, initial_new_owner_username=post_data.get('changeowners'),
open_dialog=ezt.boolean(True))
else:
old_and_new_owner_ids = [new_owner_id] + mr.hotlist.owner_ids
(_, editor_ids, follower_ids) = hotlist_helpers.MembersWithoutGivenIDs(
mr.hotlist, old_and_new_owner_ids)
if remain_as_editor and mr.hotlist.owner_ids:
editor_ids.append(mr.hotlist.owner_ids[0])
self.services.features.UpdateHotlistRoles(
mr.cnxn, mr.hotlist_id, [new_owner_id], editor_ids, follower_ids)
hotlist = self.services.features.GetHotlist(mr.cnxn, mr.hotlist_id)
hotlist_url = hotlist_helpers.GetURLOfHotlist(
mr.cnxn, hotlist, self.services.user)
return framework_helpers.FormatAbsoluteURL(
mr,'%s%s' % (hotlist_url, urls.HOTLIST_PEOPLE),
saved=1, ts=int(time.time()),
include_project=False)
def ProcessAddMembers(self, mr, post_data, hotlist_url):
"""Process the user's request to add members.
Args:
mr: common information parsed from the HTTP request.
post_data: dictionary of form data
hotlist_url: hotlist_url to return to after data has been processed.
Returns:
String URL to redirect the user to after processing
"""
# NOTE: using project_helpers function
new_member_ids = project_helpers.ParseUsernames(
mr.cnxn, self.services.user, post_data.get('addmembers'))
if not new_member_ids or not post_data.get('addmembers'):
mr.errors.incorrect_email_input = (
'Please give full emails seperated by commas.')
role = post_data['role']
(owner_ids, editor_ids, follower_ids) = hotlist_helpers.MembersWithGivenIDs(
mr.hotlist, new_member_ids, role)
# TODO(jojwang): implement MAX_HOTLIST_PEOPLE
if not owner_ids:
mr.errors.addmembers = (
'Cannot have a hotlist without an owner; please leave at least one.')
if mr.errors.AnyErrors():
add_members_str = post_data.get('addmembers', '')
self.PleaseCorrect(
mr, initial_add_members=add_members_str, initially_expand_form=True)
else:
self.services.features.UpdateHotlistRoles(
mr.cnxn, mr.hotlist_id, owner_ids, editor_ids, follower_ids)
return framework_helpers.FormatAbsoluteURL(
mr, '%s%s' % (
hotlist_url, urls.HOTLIST_PEOPLE),
saved=1, ts=int(time.time()),
new=','.join([str(u) for u in new_member_ids]),
include_project=False)
def ProcessRemoveMembers(self, mr, post_data, hotlist_url):
"""Process the user's request to remove members."""
remove_strs = post_data.getall('remove')
logging.info('remove_strs = %r', remove_strs)
remove_ids = set(
self.services.user.LookupUserIDs(mr.cnxn, remove_strs).values())
(owner_ids, editor_ids,
follower_ids) = hotlist_helpers.MembersWithoutGivenIDs(
mr.hotlist, remove_ids)
self.services.features.UpdateHotlistRoles(
mr.cnxn, mr.hotlist_id, owner_ids, editor_ids, follower_ids)
return framework_helpers.FormatAbsoluteURL(
mr, '%s%s' % (
hotlist_url, urls.HOTLIST_PEOPLE),
saved=1, ts=int(time.time()), include_project=False)
def ProcessRemoveSelf(self, mr, hotlist_url):
"""Process the request to remove the logged-in user."""
remove_ids = [mr.auth.user_id]
# This function does no permission checking; that's done by the caller.
(owner_ids, editor_ids,
follower_ids) = hotlist_helpers.MembersWithoutGivenIDs(
mr.hotlist, remove_ids)
self.services.features.UpdateHotlistRoles(
mr.cnxn, mr.hotlist_id, owner_ids, editor_ids, follower_ids)
return framework_helpers.FormatAbsoluteURL(
mr, '%s%s' % (
hotlist_url, urls.HOTLIST_PEOPLE),
saved=1, ts=int(time.time()), include_project=False)
| [
"[email protected]"
] | |
0cbd5474b71672cb168a892ee1b300395a042c70 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/turtledemo/lindenmayer.py | 5f29811cc858c0d44403b5343333afd966e76012 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:af4e1ba8102f30f049caf1c4657df7ee1a0b79dd016ca78698a1cfe4067a7df7
size 2553
| [
"[email protected]"
] | |
dc654d354452caf7235e7e12ee24f7a2b350f932 | c98d0c486188a120e844830513abae5d48118561 | /venv/bin/easy_install | 249fd03ac322c27c2a47f44710f88d9c75c7043f | [] | no_license | lminnock/mldjango | 0b9e4979925007b53f842cac814d6d5d57afeccd | 692d4eacc20fff0edfa399b531df4802d7d4fbe7 | refs/heads/master | 2021-01-10T08:15:02.198766 | 2015-10-31T17:33:13 | 2015-10-31T17:33:13 | 45,311,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/home/Lminnock/mlworkshop/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f391e38a5611fd6cdd88cebaaff3a9c04b3d4a5a | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030110920.py | d789727b7aff7632494fbbeb3ed03cb146885f45 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from django.db import models
from modelcluster.models import ClusterableModel
# Create your models here.
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_fro
)
| [
"[email protected]"
] | |
c23d8ce5ad9ad476b4bb2bf58e618efab78a3471 | ed454f31cf5a3d2605f275cc83ec82f34f06bb33 | /zerver/views/pointer.py | 7f015f01e2b6d0e5aac11ed4a96adc385d4a39ff | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | 18-2-SKKU-OSS/2018-2-OSS-L5 | b62a3ce53eff63ed09395dc1f8296fef089d90e2 | 190bc3afbf973d5917e82ad9785d01b2ea1773f2 | refs/heads/master | 2020-04-08T11:44:14.468373 | 2018-12-11T04:35:30 | 2018-12-11T04:35:30 | 159,317,980 | 3 | 4 | Apache-2.0 | 2018-12-09T14:14:21 | 2018-11-27T10:30:18 | Python | UTF-8 | Python | false | false | 1,186 | py |
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.models import UserProfile, UserMessage, get_usermessage_by_message_id
def get_pointer_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request: HttpRequest, user_profile: UserProfile,
pointer: int=REQ(converter=to_non_negative_int)) -> HttpResponse:
if pointer <= user_profile.pointer:
return json_success()
if get_usermessage_by_message_id(user_profile, pointer) is None:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, request.client, pointer, update_flags=update_flags)
return json_success()
| [
"[email protected]"
] | |
1a60b8bfcd925f0de5a4773b97fa9ae6113c5d09 | 62bbfb6c50bba16304202aea96d1de4990f95e04 | /dependencies/pulumi_aws/cloudwatch/outputs.py | 8b2b4f7a5d50775dcca3e1099d5dabb922874636 | [] | no_license | adriell/lambda-autoservico-storagegateway | b40b8717c8de076e61bbd422461c7d624a0d2273 | f6e3dea61b004b73943a5438c658d3f019f106f7 | refs/heads/main | 2023-03-16T14:41:16.821675 | 2021-03-11T03:30:33 | 2021-03-11T03:30:33 | 345,865,704 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 27,282 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'EventPermissionCondition',
'EventTargetBatchTarget',
'EventTargetDeadLetterConfig',
'EventTargetEcsTarget',
'EventTargetEcsTargetNetworkConfiguration',
'EventTargetInputTransformer',
'EventTargetKinesisTarget',
'EventTargetRetryPolicy',
'EventTargetRunCommandTarget',
'EventTargetSqsTarget',
'LogMetricFilterMetricTransformation',
'MetricAlarmMetricQuery',
'MetricAlarmMetricQueryMetric',
]
@pulumi.output_type
class EventPermissionCondition(dict):
def __init__(__self__, *,
key: str,
type: str,
value: str):
"""
:param str key: Key for the condition. Valid values: `aws:PrincipalOrgID`.
:param str type: Type of condition. Value values: `StringEquals`.
:param str value: Value for the key.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
Key for the condition. Valid values: `aws:PrincipalOrgID`.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of condition. Value values: `StringEquals`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Value for the key.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetBatchTarget(dict):
def __init__(__self__, *,
job_definition: str,
job_name: str,
array_size: Optional[int] = None,
job_attempts: Optional[int] = None):
"""
:param str job_definition: The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist.
:param str job_name: The name to use for this execution of the job, if the target is an AWS Batch job.
:param int array_size: The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000.
:param int job_attempts: The number of times to attempt to retry, if the job fails. Valid values are 1 to 10.
"""
pulumi.set(__self__, "job_definition", job_definition)
pulumi.set(__self__, "job_name", job_name)
if array_size is not None:
pulumi.set(__self__, "array_size", array_size)
if job_attempts is not None:
pulumi.set(__self__, "job_attempts", job_attempts)
@property
@pulumi.getter(name="jobDefinition")
def job_definition(self) -> str:
"""
The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist.
"""
return pulumi.get(self, "job_definition")
@property
@pulumi.getter(name="jobName")
def job_name(self) -> str:
"""
The name to use for this execution of the job, if the target is an AWS Batch job.
"""
return pulumi.get(self, "job_name")
@property
@pulumi.getter(name="arraySize")
def array_size(self) -> Optional[int]:
"""
The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000.
"""
return pulumi.get(self, "array_size")
@property
@pulumi.getter(name="jobAttempts")
def job_attempts(self) -> Optional[int]:
"""
The number of times to attempt to retry, if the job fails. Valid values are 1 to 10.
"""
return pulumi.get(self, "job_attempts")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetDeadLetterConfig(dict):
def __init__(__self__, *,
arn: Optional[str] = None):
"""
:param str arn: - ARN of the SQS queue specified as the target for the dead-letter queue.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
- ARN of the SQS queue specified as the target for the dead-letter queue.
"""
return pulumi.get(self, "arn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetEcsTarget(dict):
def __init__(__self__, *,
task_definition_arn: str,
group: Optional[str] = None,
launch_type: Optional[str] = None,
network_configuration: Optional['outputs.EventTargetEcsTargetNetworkConfiguration'] = None,
platform_version: Optional[str] = None,
task_count: Optional[int] = None):
"""
:param str task_definition_arn: The ARN of the task definition to use if the event target is an Amazon ECS cluster.
:param str group: Specifies an ECS task group for the task. The maximum length is 255 characters.
:param str launch_type: Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. Valid values are `EC2` or `FARGATE`.
:param 'EventTargetEcsTargetNetworkConfigurationArgs' network_configuration: Use this if the ECS task uses the awsvpc network mode. This specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. Required if launch_type is FARGATE because the awsvpc mode is required for Fargate tasks.
:param str platform_version: Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This is used only if LaunchType is FARGATE. For more information about valid platform versions, see [AWS Fargate Platform Versions](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html).
:param int task_count: The number of tasks to create based on the TaskDefinition. The default is 1.
"""
pulumi.set(__self__, "task_definition_arn", task_definition_arn)
if group is not None:
pulumi.set(__self__, "group", group)
if launch_type is not None:
pulumi.set(__self__, "launch_type", launch_type)
if network_configuration is not None:
pulumi.set(__self__, "network_configuration", network_configuration)
if platform_version is not None:
pulumi.set(__self__, "platform_version", platform_version)
if task_count is not None:
pulumi.set(__self__, "task_count", task_count)
@property
@pulumi.getter(name="taskDefinitionArn")
def task_definition_arn(self) -> str:
"""
The ARN of the task definition to use if the event target is an Amazon ECS cluster.
"""
return pulumi.get(self, "task_definition_arn")
@property
@pulumi.getter
def group(self) -> Optional[str]:
"""
Specifies an ECS task group for the task. The maximum length is 255 characters.
"""
return pulumi.get(self, "group")
@property
@pulumi.getter(name="launchType")
def launch_type(self) -> Optional[str]:
"""
Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. Valid values are `EC2` or `FARGATE`.
"""
return pulumi.get(self, "launch_type")
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> Optional['outputs.EventTargetEcsTargetNetworkConfiguration']:
"""
Use this if the ECS task uses the awsvpc network mode. This specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. Required if launch_type is FARGATE because the awsvpc mode is required for Fargate tasks.
"""
return pulumi.get(self, "network_configuration")
@property
@pulumi.getter(name="platformVersion")
def platform_version(self) -> Optional[str]:
"""
Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This is used only if LaunchType is FARGATE. For more information about valid platform versions, see [AWS Fargate Platform Versions](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html).
"""
return pulumi.get(self, "platform_version")
@property
@pulumi.getter(name="taskCount")
def task_count(self) -> Optional[int]:
"""
The number of tasks to create based on the TaskDefinition. The default is 1.
"""
return pulumi.get(self, "task_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetEcsTargetNetworkConfiguration(dict):
def __init__(__self__, *,
subnets: Sequence[str],
assign_public_ip: Optional[bool] = None,
security_groups: Optional[Sequence[str]] = None):
"""
:param Sequence[str] subnets: The subnets associated with the task or service.
:param bool assign_public_ip: Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`.
:param Sequence[str] security_groups: The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used.
"""
pulumi.set(__self__, "subnets", subnets)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if security_groups is not None:
pulumi.set(__self__, "security_groups", security_groups)
@property
@pulumi.getter
def subnets(self) -> Sequence[str]:
"""
The subnets associated with the task or service.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[bool]:
"""
Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`.
"""
return pulumi.get(self, "assign_public_ip")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[Sequence[str]]:
"""
The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used.
"""
return pulumi.get(self, "security_groups")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetInputTransformer(dict):
def __init__(__self__, *,
input_template: str,
input_paths: Optional[Mapping[str, str]] = None):
"""
:param Mapping[str, str] input_paths: Key value pairs specified in the form of JSONPath (for example, time = $.time)
* You can have as many as 10 key-value pairs.
* You must use JSON dot notation, not bracket notation.
* The keys can't start with "AWS".
"""
pulumi.set(__self__, "input_template", input_template)
if input_paths is not None:
pulumi.set(__self__, "input_paths", input_paths)
@property
@pulumi.getter(name="inputTemplate")
def input_template(self) -> str:
return pulumi.get(self, "input_template")
@property
@pulumi.getter(name="inputPaths")
def input_paths(self) -> Optional[Mapping[str, str]]:
"""
Key value pairs specified in the form of JSONPath (for example, time = $.time)
* You can have as many as 10 key-value pairs.
* You must use JSON dot notation, not bracket notation.
* The keys can't start with "AWS".
"""
return pulumi.get(self, "input_paths")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetKinesisTarget(dict):
def __init__(__self__, *,
partition_key_path: Optional[str] = None):
"""
:param str partition_key_path: The JSON path to be extracted from the event and used as the partition key.
"""
if partition_key_path is not None:
pulumi.set(__self__, "partition_key_path", partition_key_path)
@property
@pulumi.getter(name="partitionKeyPath")
def partition_key_path(self) -> Optional[str]:
"""
The JSON path to be extracted from the event and used as the partition key.
"""
return pulumi.get(self, "partition_key_path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetRetryPolicy(dict):
def __init__(__self__, *,
maximum_event_age_in_seconds: Optional[int] = None,
maximum_retry_attempts: Optional[int] = None):
"""
:param int maximum_event_age_in_seconds: The age in seconds to continue to make retry attempts.
:param int maximum_retry_attempts: maximum number of retry attempts to make before the request fails
"""
if maximum_event_age_in_seconds is not None:
pulumi.set(__self__, "maximum_event_age_in_seconds", maximum_event_age_in_seconds)
if maximum_retry_attempts is not None:
pulumi.set(__self__, "maximum_retry_attempts", maximum_retry_attempts)
@property
@pulumi.getter(name="maximumEventAgeInSeconds")
def maximum_event_age_in_seconds(self) -> Optional[int]:
"""
The age in seconds to continue to make retry attempts.
"""
return pulumi.get(self, "maximum_event_age_in_seconds")
@property
@pulumi.getter(name="maximumRetryAttempts")
def maximum_retry_attempts(self) -> Optional[int]:
"""
maximum number of retry attempts to make before the request fails
"""
return pulumi.get(self, "maximum_retry_attempts")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetRunCommandTarget(dict):
def __init__(__self__, *,
key: str,
values: Sequence[str]):
"""
:param str key: Can be either `tag:tag-key` or `InstanceIds`.
:param Sequence[str] values: If Key is `tag:tag-key`, Values is a list of tag values. If Key is `InstanceIds`, Values is a list of Amazon EC2 instance IDs.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
Can be either `tag:tag-key` or `InstanceIds`.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
If Key is `tag:tag-key`, Values is a list of tag values. If Key is `InstanceIds`, Values is a list of Amazon EC2 instance IDs.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventTargetSqsTarget(dict):
def __init__(__self__, *,
message_group_id: Optional[str] = None):
"""
:param str message_group_id: The FIFO message group ID to use as the target.
"""
if message_group_id is not None:
pulumi.set(__self__, "message_group_id", message_group_id)
@property
@pulumi.getter(name="messageGroupId")
def message_group_id(self) -> Optional[str]:
"""
The FIFO message group ID to use as the target.
"""
return pulumi.get(self, "message_group_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LogMetricFilterMetricTransformation(dict):
def __init__(__self__, *,
name: str,
namespace: str,
value: str,
default_value: Optional[str] = None):
"""
:param str name: The name of the CloudWatch metric to which the monitored log information should be published (e.g. `ErrorCount`)
:param str namespace: The destination namespace of the CloudWatch metric.
:param str value: What to publish to the metric. For example, if you're counting the occurrences of a particular term like "Error", the value will be "1" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event.
:param str default_value: The value to emit when a filter pattern does not match a log event.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "value", value)
if default_value is not None:
pulumi.set(__self__, "default_value", default_value)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the CloudWatch metric to which the monitored log information should be published (e.g. `ErrorCount`)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> str:
"""
The destination namespace of the CloudWatch metric.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def value(self) -> str:
"""
What to publish to the metric. For example, if you're counting the occurrences of a particular term like "Error", the value will be "1" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> Optional[str]:
"""
The value to emit when a filter pattern does not match a log event.
"""
return pulumi.get(self, "default_value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MetricAlarmMetricQuery(dict):
def __init__(__self__, *,
id: str,
expression: Optional[str] = None,
label: Optional[str] = None,
metric: Optional['outputs.MetricAlarmMetricQueryMetric'] = None,
return_data: Optional[bool] = None):
"""
:param str id: A short name used to tie this object to the results in the response. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.
:param str expression: The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the id of the other metrics to refer to those metrics, and can also use the id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the [Amazon CloudWatch User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax).
:param str label: A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents.
:param 'MetricAlarmMetricQueryMetricArgs' metric: The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data.
:param bool return_data: Specify exactly one `metric_query` to be `true` to use that `metric_query` result as the alarm.
"""
pulumi.set(__self__, "id", id)
if expression is not None:
pulumi.set(__self__, "expression", expression)
if label is not None:
pulumi.set(__self__, "label", label)
if metric is not None:
pulumi.set(__self__, "metric", metric)
if return_data is not None:
pulumi.set(__self__, "return_data", return_data)
@property
@pulumi.getter
def id(self) -> str:
"""
A short name used to tie this object to the results in the response. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def expression(self) -> Optional[str]:
"""
The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the id of the other metrics to refer to those metrics, and can also use the id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the [Amazon CloudWatch User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax).
"""
return pulumi.get(self, "expression")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def metric(self) -> Optional['outputs.MetricAlarmMetricQueryMetric']:
"""
The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data.
"""
return pulumi.get(self, "metric")
@property
@pulumi.getter(name="returnData")
def return_data(self) -> Optional[bool]:
"""
Specify exactly one `metric_query` to be `true` to use that `metric_query` result as the alarm.
"""
return pulumi.get(self, "return_data")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MetricAlarmMetricQueryMetric(dict):
def __init__(__self__, *,
metric_name: str,
period: int,
stat: str,
dimensions: Optional[Mapping[str, str]] = None,
namespace: Optional[str] = None,
unit: Optional[str] = None):
"""
:param str metric_name: The name for this metric.
See docs for [supported metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
:param int period: The period in seconds over which the specified `stat` is applied.
:param str stat: The statistic to apply to this metric.
Either of the following is supported: `SampleCount`, `Average`, `Sum`, `Minimum`, `Maximum`
:param Mapping[str, str] dimensions: The dimensions for this metric. For the list of available dimensions see the AWS documentation [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
:param str namespace: The namespace for this metric. See docs for the [list of namespaces](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html).
See docs for [supported metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
:param str unit: The unit for this metric.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "stat", stat)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
"""
The name for this metric.
See docs for [supported metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
"""
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def period(self) -> int:
"""
The period in seconds over which the specified `stat` is applied.
"""
return pulumi.get(self, "period")
@property
@pulumi.getter
def stat(self) -> str:
"""
The statistic to apply to this metric.
Either of the following is supported: `SampleCount`, `Average`, `Sum`, `Minimum`, `Maximum`
"""
return pulumi.get(self, "stat")
@property
@pulumi.getter
def dimensions(self) -> Optional[Mapping[str, str]]:
"""
The dimensions for this metric. For the list of available dimensions see the AWS documentation [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
"""
return pulumi.get(self, "dimensions")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
The namespace for this metric. See docs for the [list of namespaces](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html).
See docs for [supported metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def unit(self) -> Optional[str]:
"""
The unit for this metric.
"""
return pulumi.get(self, "unit")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
2bed1b1c77d3da2e76f66e85623ed28cb5a2d2f2 | 5c0a19e08a9bac3e55998f14b7984c355f8ac7e4 | /gradedproject2/manage.py | cc9a25c592d4e5d676c69714292649d3232b8b58 | [] | no_license | cs-fullstack-2019-spring/django-models4-cw-cgarciapieto | f533a17c61ed48e099fe2a3adb048c4d37b55070 | 9385954101a2a05ffae6f6fb62ae2d70df453c7b | refs/heads/master | 2020-04-24T12:07:42.688722 | 2019-02-24T22:16:43 | 2019-02-24T22:16:43 | 171,946,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gradedproject2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5136288c6e4966cb780dcf795be5c2465999260c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_350/ch40_2019_09_08_04_20_40_849177.py | cf087c2d1de136a88bc2faab43bebab4f0e36c5f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py |
def fatorial(n):
while i <= n:
y = fat*i
i=1+i
fat=1
return y
| [
"[email protected]"
] | |
8b5f46f03fd3acf298116d84ec5c3e44a9f3af84 | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/login.py | 785f5e468166714bb35241f17932e9b1ce0d062a | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | a, b = [x for x in input().split()]
ans = a[0]
i = 1
j = 0
while i < len(a) or j < len(b):
if i >= len(a) or b[j] < a[i]:
ans += b[j]
j += 1
break
else:
ans += a[i]
i += 1
print(ans)
| [
"[email protected]"
] | |
d4f0c626e2bd451c7704118209afe8adf6d93c47 | 93b88de2ae87c4d7bed4d545fe38c502e84e1ba6 | /table/models.py | dee20de09b8933b6cbaa0e3a4cfd8823273031b1 | [] | no_license | jod35/empdata-table | b77fb8394f74cb71d50aeb1c2d5183d39f9fd5dd | 4bda87eb8f54b4e53c3adc534002f50a7e46c5f8 | refs/heads/master | 2020-12-20T05:23:17.126355 | 2020-01-25T05:49:20 | 2020-01-25T05:49:20 | 235,975,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from . import db
class Employee(db.Model):
id=db.Column(db.Integer(),primary_key=True)
name=db.Column(db.String(40),nullable=False)
age=db.Column(db.Integer(),nullable=False)
gender=db.Column(db.String(10),nullable=False)
salary=db.Column(db.Integer(),nullable=False)
residence=db.Column(db.String(25),nullable=False)
def __repr__(self):
return "Employee {}".format(self.name) | [
"[email protected]"
] | |
0d6d50fe03634a9956397e0cd037cd9f4ae7634e | 607e1b1ec5a41fd5f6cf83e7e20a1372717d2486 | /leetcode/62.py | a6d0a7914195cf7602733f2e272dab0afe4cdedd | [] | no_license | histuckyi/algorithm | 067e627e1672e858b3143440200262e0e5db495c | fb04bbd8cdb3ead707bb07abbc1688b99f7505a7 | refs/heads/master | 2023-07-08T05:22:49.049599 | 2023-06-24T07:00:25 | 2023-06-24T07:00:25 | 147,614,786 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | """
LeetCode 62. Unique Paths
blog : https://daimhada.tistory.com/131
problem : https://leetcode.com/problems/unique-paths/submissions/
"""
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
r = n
c = m
field = [[0]*c for i in range(r)]
rd = [0, 1]
cd = [1, 0]
pos_list = [(0,0)]
while pos_list:
pos = pos_list.pop()
pr, pc = pos
field[pr][pc] += 1
for i in range(2):
temp_r = pr + rd[i]
temp_c = pc + cd[i]
if temp_r < 0 or temp_c < 0 or r <= temp_r or c <= temp_c:
continue
pos_list.append((temp_r, temp_c))
return field[r-1][c-1]
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
"""
Runtime : faster than 40.64% of Python3
Memory Usage : less than 5.25% of Python3
"""
r = n
c = m
field = [[0]*(c) for i in range(r)]
direction = [(0,-1), (-1, 0)]
for i in range(r):
for j in range(c):
if i == 0 or j == 0:
field[i][j] = 1
continue
for next_pos in direction:
add_r, add_c = next_pos
temp_r = i + add_r
temp_c = j + add_c
if temp_r < 0 or temp_c < 0 or r <= temp_r or c <= temp_c:
continue
field[i][j] += field[temp_r][temp_c]
return field[r-1][c-1]
s = Solution()
s.uniquePaths(7,3) | [
"[email protected]"
] | |
88ac7eaa07a6e60ea86b3a2c3c89d5bdf3800eed | 7a0f0c2107019c82b693e809c1a9b912bee9d9b1 | /app/chap3_2_2/models/mkqueries.py | a6ed847f49085fe78b1ee60cf6cf84fe8ca6cc7b | [] | no_license | petershan1119/Django-Official-Practice | 352f17a4c0b03abe81af7471c4823f096868a4b5 | a24f626c28bda6024e1b5380f1f8a3c436ba5a0d | refs/heads/master | 2021-01-24T01:28:46.044910 | 2018-02-26T00:32:55 | 2018-02-26T00:32:55 | 122,808,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py | from django.db import models
__all__ = (
'Blog',
'Author',
'Entry',
)
class Blog(models.Model):
name = models.CharField(max_length=100)
tagline = models.TextField(blank=True)
def __str__(self):
return self.name
class Author(models.Model):
"""
## ManyToMany의 경우 add 이용해서 업데이트 (p.105)
joe = Author.objects.create(name='Joe')
entry.authors.all()
entry.authors.add(joe)
"""
name = models.CharField(max_length=200)
email = models.EmailField(blank=True)
def __str__(self):
return self.name
class Entry(models.Model):
"""
## ForeignKey 업데이트 경우 그냥 할당 (p.105)
b = Blog(name='Beatles Blog', tagline='All the latest Beatles news')
b.save()
entry = Entry.objects.create(blog=b, headline='Test entry')
entry.blog
entry.blog.pk
b2 = Blog.objects.create(name='Cheddar Talk')
entry.blog = b2
## filters 이용해서 특정 objects retrieve하는 경우 (p.106)
Entry.objects.create(blog=b, headline='2006 test entry', pub_date=date(2006, 1, 1))
Entry.objects.filter(pub_date__year=2006)
## chaining filters 예시 (p.107)
b = Blog.objects.create(name='lhy Blog')
Entry.objects.create(blog=b, headline='What\'s up', pub_date=date(2020, 1, 1))
Entry.objects.create(blog=b, headline='What 123', pub_date=date(2000, 1, 1))
Entry.objects.create(blog=b, headline='Whattttttt', pub_date=date(2005, 2, 1))
## Everything inside a single filter() call vs. Successive filter() (p.111)
b1 = Blog.objects.create(name='Lennon and 2008')
b2 = Blog.objects.create(name='Lennon 2008 separate')
Entry.objects.create(blog=b1, headline='Lennon', pub_date=date(2008, 1, 1))
Entry.objects.create(blog=b2, headline='Fastcampus', pub_date=date(2008, 1, 1))
Entry.objects.create(blog=b2, headline='Lennon', pub_date=date(2018, 2, 19))
Blog.objects.filter(entry__headline__contains='Lennon', entry__pub_date__year=2008)
Blog.objects.filter(entry__headline__contains='Lennon').filter(entry__pub_date__year=2008)
## 다른 fields간 values 비교 (p.112)
b = Blog.objects.create(name='F blog')
e1 = Entry.objects.create(blog=b, headline='F entry', n_comments=10, n_pingbacks=5)
e1.n_comments = 10
e1.n_pingbacks = 5
e1.save()
e2 = Entry.objects.create(blog=b, headline='F entry2', n_comments=5, n_pingbacks=10)
Entry.objects.filter(n_comments__gt=F('n_pingbacks'))
"""
blog = models.ForeignKey(Blog, on_delete=models.CASCADE)
headline = models.CharField(max_length=255)
pub_date = models.DateField(blank=True, null=True)
mod_date = models.DateField(auto_now=True)
authors = models.ManyToManyField(Author, blank=True)
n_comments = models.IntegerField(default=0)
n_pingbacks = models.IntegerField(default=0)
rating = models.IntegerField(default=0)
def __str__(self):
return self.headline | [
"[email protected]"
] | |
c03eaa16a3e0a5b7f3a46d2d94e6d83848e0d6e8 | 4f972877da14226125440b3da9bdb058764d8a54 | /pandasStudy/temp_opt.py | f108619a26d725634c493b10c9b32adf500d1dee | [] | no_license | ZhiYinZhang/study | 16c29990cb371e7e278c437aa0abc7c348614063 | 8c085310b4f65e36f2d84d0acda4ca257b7389af | refs/heads/master | 2021-07-09T16:05:02.925343 | 2020-06-30T07:53:05 | 2020-06-30T07:53:05 | 153,767,096 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #-*- coding: utf-8 -*-
# @Time : 2019/3/9 14:37
# @Author : Z
# @Email : S
# @File : temp_opt.py
import pandas as pd
import json
# df.to_json(,orient="records",force_ascii=False)
# path="e:/test/json/shaoshanshi.json"
#
# df=pd.read_json(path,orient="records",lines=True)
#
# print(df)
# df.to_json("e:/test/json/shaoshanshi.csv",orient="records",force_ascii=False)
# df=pd.read_csv("E:/test/dianshang/data/cust_tel_20200110.csv",dtype=str)
#
# df.to_json("e://test/dianshang/data/cust_tel_20200110.json",orient="records")
# path="e://test//json//"
# df=pd.read_json(path+"part.json",orient="records",lines=True,encoding="utf-8",dtype=False)
#
#
# # pd.read_csv()
#
# print(df.dtypes)
#
# print(df)
# df.to_json(path+"part1.json",orient="records",force_ascii=False)
pd.read_excel()
df=pd.read_csv("e://test//csv//test.csv",dtype=str)
print(df)
print(df.dtypes)
| [
"[email protected]"
] | |
c8b547b5c2825f3a201e760acb128b8fc94edaca | 14cc70fa60dfaa441aab34b083cff1bf59574264 | /opencivicdata/legislative/models/session.py | 397d1f240810a4a6ecba6cda44895ce9e76871cc | [] | permissive | tubaman/python-opencivicdata | 85434672bea6b40a417104d9381097df58b8a7b2 | 010cd72bdd806e76f342195a1f1e20acbed5a431 | refs/heads/master | 2020-07-26T13:32:22.452022 | 2019-08-20T05:56:12 | 2019-08-20T05:56:12 | 208,660,220 | 0 | 0 | BSD-3-Clause | 2019-09-15T21:33:06 | 2019-09-15T21:33:06 | null | UTF-8 | Python | false | false | 1,192 | py | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from opencivicdata.core.models.base import RelatedBase
from opencivicdata.core.models import Jurisdiction
from ...common import SESSION_CLASSIFICATION_CHOICES
@python_2_unicode_compatible
class LegislativeSession(RelatedBase):
jurisdiction = models.ForeignKey(Jurisdiction,
related_name='legislative_sessions',
# should be hard to delete Jurisdiction
on_delete=models.PROTECT
)
identifier = models.CharField(max_length=100)
name = models.CharField(max_length=300)
classification = models.CharField(max_length=100, choices=SESSION_CLASSIFICATION_CHOICES,
blank=True)
start_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
end_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
def __str__(self):
return '{} {}'.format(self.jurisdiction, self.name)
class Meta:
db_table = 'opencivicdata_legislativesession'
| [
"[email protected]"
] | |
18355aa0f6375e11796880df007410c7b767cc84 | 111212d14fe7344a8635f0f8b392a657b5db27d8 | /trabajo/migrations/0001_initial.py | c32bbbabe988780ebcaa511d59900c57eef5bca3 | [] | no_license | sebasgoldberg/agencia | c83acd2cbdd2097e65c9dfb85fafbd31ed2fa8e8 | dc837d8d40183cb22231a13e408bf56b8ce168e0 | refs/heads/master | 2021-01-25T03:48:18.591880 | 2014-05-19T10:21:30 | 2014-05-19T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,493 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Productora'
db.create_table(u'trabajo_productora', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=60)),
('mail', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('imagen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'trabajo', ['Productora'])
# Adding model 'DireccionProductora'
db.create_table(u'trabajo_direccionproductora', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'], null=True, on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, on_delete=models.PROTECT)),
('ciudad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.City'], null=True, on_delete=models.PROTECT)),
('barrio', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('direccion', self.gf('django.db.models.fields.CharField')(max_length=120)),
('codigo_postal', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
('productora', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Productora'])),
))
db.send_create_signal(u'trabajo', ['DireccionProductora'])
# Adding model 'TelefonoProductora'
db.create_table(u'trabajo_telefonoproductora', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('compania', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['telefono.Compania'], null=True, on_delete=models.PROTECT, blank=True)),
('telefono', self.gf('django.db.models.fields.CharField')(max_length=60)),
('productora', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Productora'])),
))
db.send_create_signal(u'trabajo', ['TelefonoProductora'])
# Adding model 'ItemPortfolio'
db.create_table(u'trabajo_itemportfolio', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('titulo', self.gf('django.db.models.fields.CharField')(max_length=100)),
('video', self.gf('django.db.models.fields.URLField')(max_length=200, unique=True, null=True, blank=True)),
('codigo_video', self.gf('django.db.models.fields.CharField')(max_length=30, unique=True, null=True, blank=True)),
('imagen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('fecha', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2014, 1, 16, 0, 0))),
))
db.send_create_signal(u'trabajo', ['ItemPortfolio'])
# Adding model 'Trabajo'
db.create_table(u'trabajo_trabajo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('titulo', self.gf('django.db.models.fields.CharField')(max_length=100)),
('productora', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Productora'], on_delete=models.PROTECT)),
('descripcion', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('imagen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('estado', self.gf('django.db.models.fields.CharField')(max_length=2)),
('fecha_ingreso', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2014, 1, 16, 0, 0))),
('publicado', self.gf('django.db.models.fields.BooleanField')()),
))
db.send_create_signal(u'trabajo', ['Trabajo'])
# Adding model 'EventoTrabajo'
db.create_table(u'trabajo_eventotrabajo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'], null=True, on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, on_delete=models.PROTECT)),
('ciudad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.City'], null=True, on_delete=models.PROTECT)),
('barrio', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('direccion', self.gf('django.db.models.fields.CharField')(max_length=120)),
('codigo_postal', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
('fecha', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 1, 16, 0, 0), null=True, blank=True)),
('tipo', self.gf('django.db.models.fields.CharField')(max_length=1)),
('trabajo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Trabajo'], on_delete=models.PROTECT)),
))
db.send_create_signal(u'trabajo', ['EventoTrabajo'])
# Adding model 'Rol'
db.create_table(u'trabajo_rol', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('trabajo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Trabajo'], on_delete=models.PROTECT)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60)),
('cache', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=15, decimal_places=4, blank=True)),
('caracteristicas', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'trabajo', ['Rol'])
# Adding unique constraint on 'Rol', fields ['trabajo', 'descripcion']
db.create_unique(u'trabajo_rol', ['trabajo_id', 'descripcion'])
# Adding model 'EventoRol'
db.create_table(u'trabajo_eventorol', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'], null=True, on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, on_delete=models.PROTECT)),
('ciudad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.City'], null=True, on_delete=models.PROTECT)),
('barrio', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('direccion', self.gf('django.db.models.fields.CharField')(max_length=120)),
('codigo_postal', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
('fecha', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 1, 16, 0, 0), null=True, blank=True)),
('tipo', self.gf('django.db.models.fields.CharField')(max_length=1)),
('rol', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Rol'], on_delete=models.PROTECT)),
))
db.send_create_signal(u'trabajo', ['EventoRol'])
# Adding model 'Postulacion'
db.create_table(u'trabajo_postulacion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('agenciado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['agencia.Agenciado'], on_delete=models.PROTECT)),
('rol', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Rol'], on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.CharField')(max_length=2)),
))
db.send_create_signal(u'trabajo', ['Postulacion'])
# Adding unique constraint on 'Postulacion', fields ['agenciado', 'rol']
db.create_unique(u'trabajo_postulacion', ['agenciado_id', 'rol_id'])
def backwards(self, orm):
# Removing unique constraint on 'Postulacion', fields ['agenciado', 'rol']
db.delete_unique(u'trabajo_postulacion', ['agenciado_id', 'rol_id'])
# Removing unique constraint on 'Rol', fields ['trabajo', 'descripcion']
db.delete_unique(u'trabajo_rol', ['trabajo_id', 'descripcion'])
# Deleting model 'Productora'
db.delete_table(u'trabajo_productora')
# Deleting model 'DireccionProductora'
db.delete_table(u'trabajo_direccionproductora')
# Deleting model 'TelefonoProductora'
db.delete_table(u'trabajo_telefonoproductora')
# Deleting model 'ItemPortfolio'
db.delete_table(u'trabajo_itemportfolio')
# Deleting model 'Trabajo'
db.delete_table(u'trabajo_trabajo')
# Deleting model 'EventoTrabajo'
db.delete_table(u'trabajo_eventotrabajo')
# Deleting model 'Rol'
db.delete_table(u'trabajo_rol')
# Deleting model 'EventoRol'
db.delete_table(u'trabajo_eventorol')
# Deleting model 'Postulacion'
db.delete_table(u'trabajo_postulacion')
models = {
u'agencia.agenciado': {
'Meta': {'ordering': "['nombre', 'apellido']", 'object_name': 'Agenciado'},
'activo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'altura': ('django.db.models.fields.FloatField', [], {}),
'apellido': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'calzado': ('django.db.models.fields.IntegerField', [], {}),
'como_nos_conocio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cuenta_bancaria': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'danzas': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Danza']", 'symmetrical': 'False', 'blank': 'True'}),
'deportes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Deporte']", 'symmetrical': 'False', 'blank': 'True'}),
'documento_cpf': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'documento_rg': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'estado_dientes': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.EstadoDientes']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'fecha_ingreso': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)'}),
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idiomas': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Idioma']", 'symmetrical': 'False', 'blank': 'True'}),
'indicador_maneja': ('django.db.models.fields.BooleanField', [], {}),
'indicador_tiene_registro': ('django.db.models.fields.BooleanField', [], {}),
'instrumentos': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Instrumento']", 'symmetrical': 'False', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'observaciones': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ojos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Ojos']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'pelo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Pelo']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'peso': ('django.db.models.fields.FloatField', [], {}),
'piel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Piel']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'recurso_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'responsable': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'sexo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'talle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Talle']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'talle_camisa': ('django.db.models.fields.IntegerField', [], {}),
'talle_pantalon': ('django.db.models.fields.IntegerField', [], {}),
'trabaja_como_extra': ('django.db.models.fields.BooleanField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cities_light.city': {
'Meta': {'ordering': "['name']", 'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'feature_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'population': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'blank': 'True'}),
'search_names': ('cities_light.models.ToSearchTextField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"})
},
u'cities_light.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
u'cities_light.region': {
'Meta': {'ordering': "['name']", 'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'perfil.danza': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Danza'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.deporte': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Deporte'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.estadodientes': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'EstadoDientes'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.idioma': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Idioma'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.instrumento': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Instrumento'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.ojos': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Ojos'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.pelo': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Pelo'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.piel': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Piel'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.talle': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Talle'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'telefono.compania': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Compania'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'trabajo.direccionproductora': {
'Meta': {'object_name': 'DireccionProductora'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'ciudad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.City']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'codigo_postal': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'productora': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Productora']"})
},
u'trabajo.eventorol': {
'Meta': {'object_name': 'EventoRol'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'ciudad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.City']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'codigo_postal': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'fecha': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'rol': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Rol']", 'on_delete': 'models.PROTECT'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
u'trabajo.eventotrabajo': {
'Meta': {'object_name': 'EventoTrabajo'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'ciudad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.City']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'codigo_postal': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'fecha': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'trabajo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Trabajo']", 'on_delete': 'models.PROTECT'})
},
u'trabajo.itemportfolio': {
'Meta': {'ordering': "['-fecha']", 'object_name': 'ItemPortfolio'},
'codigo_video': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'fecha': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'video': ('django.db.models.fields.URLField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'trabajo.postulacion': {
'Meta': {'ordering': "['-rol__trabajo__fecha_ingreso', 'rol__descripcion', 'agenciado__nombre', 'agenciado__apellido']", 'unique_together': "(('agenciado', 'rol'),)", 'object_name': 'Postulacion'},
'agenciado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['agencia.Agenciado']", 'on_delete': 'models.PROTECT'}),
'estado': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rol': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Rol']", 'on_delete': 'models.PROTECT'})
},
u'trabajo.productora': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Productora'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'trabajo.rol': {
'Meta': {'ordering': "['-trabajo__fecha_ingreso', 'descripcion']", 'unique_together': "(('trabajo', 'descripcion'),)", 'object_name': 'Rol'},
'cache': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '4', 'blank': 'True'}),
'caracteristicas': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trabajo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Trabajo']", 'on_delete': 'models.PROTECT'})
},
u'trabajo.telefonoproductora': {
'Meta': {'object_name': 'TelefonoProductora'},
'compania': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['telefono.Compania']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productora': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Productora']"}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'trabajo.trabajo': {
'Meta': {'ordering': "['-fecha_ingreso']", 'object_name': 'Trabajo'},
'descripcion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'estado': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'fecha_ingreso': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'productora': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Productora']", 'on_delete': 'models.PROTECT'}),
'publicado': ('django.db.models.fields.BooleanField', [], {}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['trabajo'] | [
"[email protected]"
] | |
4f086d0abd4fee89dc9252a3a4212d6653a80f19 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/1467. Probability of a Two Boxes Having The Same Number of Distinct Balls.py | 5becc6fac00c3d0f19e7da6a06a9d4ace6447378 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | from typing import List
from functools import lru_cache
class Solution:
def getProbability(self, balls: List[int]) -> float:
self.num = 0
N = len(balls)
space_in_each_box = sum(balls) // 2
@lru_cache(None)
def comb(x, y): # x<=y
res = 1
for i in range(x):
res *= y - i
for i in range(1, x + 1):
res //= i
return res
@lru_cache(None)
def get_permunation_number(balls_array):
# print(balls_array)
summ = sum(balls_array)
res = 1
for ball in balls_array:
res *= comb(ball, summ)
summ -= ball
# print(res)
return res
def dfs(cur_no, space_box1, colour_box1, colour_box2, balls_array):
if space_box1 == 0:
colour_box2 += N - cur_no
if colour_box1 == colour_box2:
balls_array1=balls_array
balls_array2=[balls[i]-(balls_array[i] if i<len(balls_array) else 0) for i in range(N)]
balls_array1 = tuple(sorted([x for x in balls_array1 if x!=0]))
balls_array2 = tuple(sorted([x for x in balls_array2 if x != 0]))
temp1 = get_permunation_number(balls_array1)
temp2 = get_permunation_number(balls_array2)
self.num += temp1*temp2
else:
if cur_no < N:
for i in range(min(space_box1+1, balls[cur_no]+1)):
if i == 0:
dfs(cur_no + 1, space_box1, colour_box1, colour_box2 + 1, balls_array+[0])
elif i == balls[cur_no]:
dfs(cur_no + 1, space_box1 - i, colour_box1 + 1, colour_box2, balls_array + [i])
else:
dfs(cur_no + 1, space_box1 - i, colour_box1 + 1, colour_box2 + 1, balls_array + [i])
self.den=get_permunation_number(tuple(sorted(balls)))
dfs(0, space_in_each_box, 0, 0, [])
return self.num / self.den
balls=[1,1]
balls= [2,1,1]
balls = [6, 6, 6, 6, 6, 6,6,6]
print(Solution().getProbability(balls))
| [
"19241008o"
] | 19241008o |
b90c7a68490243757448c83d51d4eae5a3c86fad | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /ppo_baseline_DMB/WORKINGON/easy_ppo_v2/storage.py | 0bd79023734c597fa209870d6297b8372a5c8253 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 7,531 | py | import torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
import numpy as np
def ss(s=''):
print()
print(' ---' * 15)
print(' ---' * 15)
print()
# print(' >>>>>>>>>>>>>>>>>>>> <<<<<<<<<<<<<<<<<<<< ')
print(s)
print()
print(' ---' * 15)
print(' ---' * 15)
print()
import sys
sys.exit()
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self, num_steps, num_processes, obs_shape):
self.obs = np.zeros(shape=(num_steps + 1, num_processes, *obs_shape))
self.rewards = np.zeros(shape=(num_steps, num_processes, 1))
self.value_preds = np.zeros(shape=(num_steps + 1, num_processes, 1))
self.returns = np.zeros(shape=(num_steps + 1, num_processes, 1))
self.action_log_probs = np.zeros(shape=(num_steps, num_processes, 1))
action_shape = 1
self.actions = np.zeros(shape=(num_steps, num_processes, action_shape))
self.masks = np.ones(shape=(num_steps + 1, num_processes, 1))
self.bad_masks = np.ones(shape=(num_steps + 1, num_processes, 1))
self.num_steps = num_steps
self.step = 0
def to(self, device):
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
self.bad_masks = self.bad_masks.to(device)
def insert(self, obs, actions, action_log_probs,
value_preds, rewards, masks, bad_masks):
np.copyto(self.obs[self.step + 1], obs)
np.copyto(self.actions[self.step], actions)
np.copyto(self.action_log_probs[self.step], action_log_probs)
np.copyto(self.value_preds[self.step], value_preds)
np.copyto(self.rewards[self.step], rewards)
np.copyto(self.masks[self.step + 1], masks)
np.copyto(self.bad_masks[self.step + 1], bad_masks)
self.step = (self.step + 1) % self.num_steps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.masks[0].copy_(self.masks[-1])
self.bad_masks[0].copy_(self.bad_masks[-1])
def compute_returns(self,
next_value,
gamma):
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * \
gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self,
advantages,
num_mini_batch=None,
mini_batch_size=None):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(
SubsetRandomSampler(range(batch_size)),
mini_batch_size,
drop_last=True)
for indices in sampler:
obs_batch = self.obs[:-1].view(-1, *self.obs.size()[2:])[indices]
actions_batch = self.actions.view(-1,
self.actions.size(-1))[indices]
value_preds_batch = self.value_preds[:-1].view(-1, 1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
masks_batch = self.masks[:-1].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1,
1)[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages.view(-1, 1)[indices]
yield obs_batch, actions_batch,\
value_preds_batch, return_batch,\
masks_batch, old_action_log_probs_batch,\
adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(
self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(
self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
value_preds_batch = torch.stack(value_preds_batch, 1)
return_batch = torch.stack(return_batch, 1)
masks_batch = torch.stack(masks_batch, 1)
old_action_log_probs_batch = torch.stack(
old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(
recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
value_preds_batch = _flatten_helper(T, N, value_preds_batch)
return_batch = _flatten_helper(T, N, return_batch)
masks_batch = _flatten_helper(T, N, masks_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ
| [
"[email protected]"
] | |
66b1f7ab8b33518cd88195b541716565248d3e8e | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/document/behaviors/__init__.py | 203ebd83e1f3d6ecb246888b2fffc589e66ad832 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 174 | py | from zope.interface import Interface
class IBaseDocument(Interface):
"""Marker interface for objects with a document like type
(og.document, ftw.mail.mail) etc."""
| [
"[email protected]"
] | |
3a6ecf79f1d71f56398219969add0d7eaa07bd92 | 908bba8bdc246d665d6b22e3a8b91720c34054e7 | /whatsapp-sentiment.py | e7af36895172fa9f736ffba1bc4ba56d53798139 | [
"Apache-2.0"
] | permissive | yogithesymbian/whatsapp-sentiments | 24874ab055522b8733c500a104d218b205c054a8 | d15d4a44282ecfc9b28fc0d16f2714f0f6ed7d2b | refs/heads/master | 2020-05-25T00:33:48.165911 | 2017-03-19T17:27:15 | 2017-03-19T17:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | from textblob import TextBlob
from plotly.offline import plot
import plotly.graph_objs as go
import random
user1 = "Bob"
user2 = 'Alice'
with open('chat_sample.txt', 'r+') as f:
samples = f.readlines()
d = {user1:[], user2:[]}
for line in samples:
time, *text = line.split('-')
text = ''.join(text)
name, *chat = text.split(':')
t = TextBlob(''.join(chat))
name = name.strip()
if name == user1 or name == user2:
d[name].append(t.sentiment.polarity)
trace1 = go.Scatter(
y = d[user1][:9000],
name = user1,
mode = 'markers',
marker=dict(
size='8',
colorscale='Picnic',
color = random.sample(range(9000),9000),
)
)
trace2 = go.Scatter(
y = d[user2],
name = user2,
mode = 'markers',
marker=dict(
size='7',
color = random.sample(range(8000), 8000),
colorscale='Electric',
)
)
data = [trace1, trace2]
plot(data) | [
"[email protected]"
] | |
b278f7784694cab7b0f6e4c0ae2aa4bf7f6d02af | 0e083f405af00029c9ec31849f0f7f81c56844b5 | /configs/mmseg/segmentation_sdk_dynamic.py | bfb033efed815d9f803ec76bca1feeee792fd4fd | [
"Apache-2.0"
] | permissive | open-mmlab/mmdeploy | 39b9e7b611caab2c76a6142fcb99f0bf1d92ad24 | 5479c8774f5b88d7ed9d399d4e305cb42cc2e73a | refs/heads/main | 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 | Apache-2.0 | 2023-09-14T10:39:04 | 2021-12-24T13:04:44 | Python | UTF-8 | Python | false | false | 307 | py | _base_ = ['./segmentation_dynamic.py', '../_base_/backends/sdk.py']
codebase_config = dict(model_type='sdk')
backend_config = dict(pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='PackSegInputs', meta_keys=['img_path', 'ori_shape', 'img_shape'])
])
| [
"[email protected]"
] | |
b6002bc250faf4ddfd8640d2a7ed44bf9176c3ec | 36785c0893ab1e2c81c6a03305f42459776a84e0 | /ambra_sdk/request_args.py | e29318245d880cd3dec5ab930e8d16a232ac1280 | [
"Apache-2.0"
] | permissive | dicomgrid/sdk-python | 06589f87f33850bd15e6e99fb683bada6492775f | 2618e682d38339439340d86080e8bc6ee6cf21b5 | refs/heads/master | 2022-08-28T14:50:35.864012 | 2022-08-22T12:36:50 | 2022-08-22T12:36:50 | 253,867,502 | 11 | 6 | Apache-2.0 | 2022-04-13T10:06:38 | 2020-04-07T17:36:56 | HTML | UTF-8 | Python | false | false | 5,637 | py | """Request args."""
from datetime import date
from json import JSONEncoder
from json import dumps as json_dumps
from typing import Any, Dict, Iterable, Mapping, Optional
import aiohttp
from aiohttp.helpers import sentinel
class Encoder(JSONEncoder):
"""Ambra arguments Encoder."""
def default(self, el: Any):
"""Encode default.
:param el: el
:return: encoded el
"""
if isinstance(el, date):
return el.strftime('%Y-%m-%d %H:%M:%S')
return JSONEncoder.default(self, el)
def cast_argument(arg: Any) -> Any:
"""Cast argument.
:param arg: arg
:return: casted arg
"""
if isinstance(arg, date):
return arg.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(arg, (list, dict)):
return json_dumps(arg, cls=Encoder)
return arg
def cast_arguments(args: Dict[str, Any]) -> Dict[str, str]:
"""Cast arguments.
:param args: args
:return: casted args
"""
casted_args = {}
for arg_name, arg_value in args.items():
casted_args[arg_name] = cast_argument(arg_value)
return casted_args
class RequestArgs: # NOQA:WPS230
"""Request args.
Like in requests.request args
"""
def __init__( # NOQA:D107,WPS211
self,
method: str,
url: str,
full_url: str,
params: Optional[Any] = None, # NOQA:WPS110
data: Optional[Any] = None, # NOQA:WPS110
json: Optional[Any] = None,
headers: Optional[Any] = None,
cookies: Optional[Any] = None,
files: Optional[Any] = None,
auth: Optional[Any] = None,
timeout: Optional[Any] = None,
allow_redirects: Optional[Any] = None,
proxies: Optional[Any] = None,
verify: Optional[Any] = None,
stream: Optional[Any] = None,
cert: Optional[Any] = None,
): # NOQA: DAR101
"""Init."""
self.method = method
self.url = url
self.full_url = full_url
self.params = params # NOQA:WPS110
self.data = data # NOQA:WPS110
self.json = json
self.headers = headers
self.cookies = cookies
self.files = files
self.auth = auth
self.timeout = timeout
self.allow_redirects = allow_redirects
self.proxies = proxies
self.verify = verify
self.stream = stream
self.cert = cert
def to_dict(self):
"""To dict.
:return: dict repr
"""
return self.__dict__.copy()
def dict_optional_args(
self,
autocast_arguments_to_string: bool,
):
"""Get dict optional args.
:param autocast_arguments_to_string: autocast arguments to string
:return: dict of request optional parameters
"""
dict_args = self.to_dict()
dict_args.pop('method')
dict_args.pop('url')
dict_args.pop('full_url')
if dict_args.get('data') is not None and autocast_arguments_to_string:
dict_args['data'] = cast_arguments( # NOQA:WPS110
dict_args['data'],
)
return dict_args
class AioHTTPRequestArgs: # NOQA:WPS230
"""AioHTTP Request args."""
def __init__( # NOQA:D107,WPS211
self,
method: str,
url: str,
full_url: str,
params: Optional[Mapping[str, str]] = None, # NOQA:WPS110
data: Any = None, # NOQA:WPS110
json: Any = None,
cookies=None,
headers=None,
skip_auto_headers: Optional[Iterable[str]] = None,
auth: Optional[aiohttp.BasicAuth] = None,
allow_redirects: bool = True,
max_redirects: int = 10,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
raise_for_status=None,
read_until_eof: bool = True,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
timeout=sentinel,
ssl=None,
proxy_headers=None,
trace_request_ctx=None,
):
self.method = method
self.url = url
self.full_url = full_url
self.params = params # NOQA:WPS110
self.data = data # NOQA:WPS110
self.json = json
self.cookies = cookies
self.headers = headers
self.skip_auto_headers = skip_auto_headers
self.auth = auth
self.allow_redirects = allow_redirects
self.max_redirects = max_redirects
self.compress = compress
self.chunked = chunked
self.expect100 = expect100
self.raise_for_status = raise_for_status
self.read_until_eof = read_until_eof
self.proxy = proxy
self.proxy_auth = proxy_auth
self.timeout = timeout
self.ssl = ssl
self.proxy_headers = proxy_headers
self.trace_request_ctx = trace_request_ctx
def to_dict(self):
"""To dict.
:return: dict repr
"""
return self.__dict__.copy()
def dict_optional_args(
self,
autocast_arguments_to_string: bool,
):
"""Get dict optional args.
:param autocast_arguments_to_string: autocast arguments to string
:return: dict of request optional parameters
"""
dict_args = self.to_dict()
dict_args.pop('method')
dict_args.pop('url')
dict_args.pop('full_url')
if dict_args.get('data') is not None and autocast_arguments_to_string:
dict_args['data'] = cast_arguments( # NOQA:WPS110
dict_args['data'],
)
return dict_args
| [
"[email protected]"
] | |
24aa177bffbefe30593f636267f6ed4a2bbc8b73 | 3a6d382503e11753dd81b291145847a2eabb8ec6 | /experimental/compilers/reachability/llvm_util_test.py | 1b152a96e77ea6c4ead37e28fb642958074c3e74 | [] | no_license | QuXing9/phd | 7e6f107c20e0b3b1de2b25eb99e0b640a4a0bfcf | 58ba53b6d78515ed555e40527f6923e28941cc19 | refs/heads/master | 2022-02-27T03:29:05.126378 | 2019-10-22T02:46:57 | 2019-10-22T02:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,354 | py | """Unit tests for //experimental/compilers/reachability:llvm_util."""
import sys
import pyparsing
import pytest
from absl import app
from absl import flags
from compilers.llvm import opt
from experimental.compilers.reachability import llvm_util
FLAGS = flags.FLAGS
# Bytecode generated by clang using the following command:
# $ clang -emit-llvm -S -xc - < foo.c -o - > foo.ll
# Original C source code:
#
# #include <stdio.h>
# #include <math.h>
#
# int DoSomething(int a, int b) {
# if (a % 5) {
# return a * 10;
# }
# return pow((float)a, 2.5);
# }
#
# int main(int argc, char **argv) {
# for (int i = 0; i < argc; ++i) {
# argc += DoSomething(argc, i);
# }
#
# printf("Computed value %d", argc);
# return 0;
# }
SIMPLE_C_BYTECODE = """
; ModuleID = '-'
source_filename = "-"
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.12.0"
@.str = private unnamed_addr constant [18 x i8] c"Computed value %d\00", align 1
; Function Attrs: norecurse nounwind readnone ssp uwtable
define i32 @DoSomething(i32, i32) #0 {
%3 = srem i32 %0, 5
%4 = icmp eq i32 %3, 0
br i1 %4, label %7, label %5
; <label>:5 ; preds = %2
%6 = mul nsw i32 %0, 10
br label %12
; <label>:7 ; preds = %2
%8 = sitofp i32 %0 to float
%9 = fpext float %8 to double
%10 = tail call double @llvm.pow.f64(double %9, double 2.500000e+00)
%11 = fptosi double %10 to i32
br label %12
; <label>:12 ; preds = %7, %5
%13 = phi i32 [ %6, %5 ], [ %11, %7 ]
ret i32 %13
}
; Function Attrs: nounwind readnone
declare double @llvm.pow.f64(double, double) #1
; Function Attrs: nounwind ssp uwtable
define i32 @main(i32, i8** nocapture readnone) #2 {
%3 = icmp sgt i32 %0, 0
br i1 %3, label %4, label %7
; <label>:4 ; preds = %2
br label %10
; <label>:5 ; preds = %22
%6 = phi i32 [ %24, %22 ]
br label %7
; <label>:7 ; preds = %5, %2
%8 = phi i32 [ %0, %2 ], [ %6, %5 ]
%9 = tail call i32 (i8*, ...) @printf(i8* nonnull getelementptr inbounds ([18 x i8], [18 x i8]* @.str, i64 0, i64 0), i32 %8)
ret i32 0
; <label>:10 ; preds = %4, %22
%11 = phi i32 [ %25, %22 ], [ 0, %4 ]
%12 = phi i32 [ %24, %22 ], [ %0, %4 ]
%13 = srem i32 %12, 5
%14 = icmp eq i32 %13, 0
br i1 %14, label %17, label %15
; <label>:15 ; preds = %10
%16 = mul nsw i32 %12, 10
br label %22
; <label>:17 ; preds = %10
%18 = sitofp i32 %12 to float
%19 = fpext float %18 to double
%20 = tail call double @llvm.pow.f64(double %19, double 2.500000e+00) #4
%21 = fptosi double %20 to i32
br label %22
; <label>:22 ; preds = %15, %17
%23 = phi i32 [ %16, %15 ], [ %21, %17 ]
%24 = add nsw i32 %23, %12
%25 = add nuw nsw i32 %11, 1
%26 = icmp slt i32 %25, %24
br i1 %26, label %10, label %5
}
; Function Attrs: nounwind
declare i32 @printf(i8* nocapture readonly, ...) #3
attributes #0 = { norecurse nounwind readnone ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #3 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #4 = { nounwind }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
!0 = !{i32 1, !"PIC Level", i32 2}
!1 = !{!"Apple LLVM version 8.0.0 (clang-800.0.42.1)"}
"""
# LLVM-generated dot file for the DoSomething() function of a simple C program.
# Original C source code:
#
# #include <stdio.h>
# #include <math.h>
#
# int DoSomething(int a, int b) {
# if (a % 5) {
# return a * 10;
# }
# return pow((float)a, 2.5);
# }
#
# int main(int argc, char **argv) {
# for (int i = 0; i < argc; ++i) {
# argc += DoSomething(argc, i);
# }
#
# printf("Computed value %d", argc);
# return 0;
# }
#
# I converted tabs to spaces in the following string.
SIMPLE_C_DOT = """
digraph "CFG for 'DoSomething' function" {
label="CFG for 'DoSomething' function";
Node0x7f86c670c590 [shape=record,label="{%2:\l %3 = alloca i32, align 4\l %4 = alloca i32, align 4\l %5 = alloca i32, align 4\l store i32 %0, i32* %4, align 4\l store i32 %1, i32* %5, align 4\l %6 = load i32, i32* %4, align 4\l %7 = srem i32 %6, 5\l %8 = icmp ne i32 %7, 0\l br i1 %8, label %9, label %12\l|{<s0>T|<s1>F}}"];
Node0x7f86c670c590:s0 -> Node0x7f86c65001a0;
Node0x7f86c670c590:s1 -> Node0x7f86c65001f0;
Node0x7f86c65001a0 [shape=record,label="{%9:\l\l %10 = load i32, i32* %4, align 4\l %11 = mul nsw i32 %10, 10\l store i32 %11, i32* %3, align 4\l br label %18\l}"];
Node0x7f86c65001a0 -> Node0x7f86c65084b0;
Node0x7f86c65001f0 [shape=record,label="{%12:\l\l %13 = load i32, i32* %4, align 4\l %14 = sitofp i32 %13 to float\l %15 = fpext float %14 to double\l %16 = call double @llvm.pow.f64(double %15, double 2.500000e+00)\l %17 = fptosi double %16 to i32\l store i32 %17, i32* %3, align 4\l br label %18\l}"];
Node0x7f86c65001f0 -> Node0x7f86c65084b0;
Node0x7f86c65084b0 [shape=record,label="{%18:\l\l %19 = load i32, i32* %3, align 4\l ret i32 %19\l}"];
}
"""
def test_DotCfgsFromBytecode_simple_c_program():
"""Test that simple C program produces two Dot CFGs."""
dot_cfgs = list(llvm_util.DotCfgsFromBytecode(SIMPLE_C_BYTECODE))
assert len(dot_cfgs) == 2
assert "CFG for 'DoSomething' function" in '\n'.join(dot_cfgs)
assert "CFG for 'main' function" in '\n'.join(dot_cfgs)
def test_DotCfgsFromBytecode_invalid_bytecode():
"""Test that exception is raised if bytecode is invalid."""
with pytest.raises(opt.OptException) as e_ctx:
next(llvm_util.DotCfgsFromBytecode("invalid bytecode!"))
assert str(e_ctx.value).startswith("opt failed with return code ")
def test_GetBasicBlockNameFromLabel_unrecognized_label():
"""Test that error is raised if label is not recognized."""
with pytest.raises(ValueError):
llvm_util.GetBasicBlockNameFromLabel('invalid label')
def test_GetBasicBlockNameFromLabel_label():
label = ('"{%2:\l %3 = alloca i32, align 4\l %4 = alloca i32, align 4\l '
'%5 = alloca i8**, align 8\l %6 = alloca i32, align 4\l '
'store i32 0, i32* %3, align 4\l store i32 %0, i32* %4, '
'align 4\l store i8** %1, i8*** %5, align 8\l store i32 0, '
'i32* %6, align 4\l br label %7\l}"')
assert llvm_util.GetBasicBlockNameFromLabel(label) == "%2"
def test_ControlFlowGraphFromDotSource_invalid_source():
"""Test that exception is raised if dot can't be parsed."""
with pytest.raises(pyparsing.ParseException):
llvm_util.ControlFlowGraphFromDotSource("invalid dot source!")
def test_ControlFlowGraphFromDotSource_graph_name():
"""Test that CFG has correct name."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
assert g.graph['name'] == 'DoSomething'
def test_ControlFlowGraphFromDotSource_num_nodes():
"""Test that CFG has correct number of nodes."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
assert g.number_of_nodes() == 4
def test_ControlFlowGraphFromDotSource_num_edges():
"""Test that CFG has correct number of edges."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
assert g.number_of_edges() == 4
def test_ControlFlowGraphFromDotSource_is_valid():
"""Test that CFG is valid."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
# Control flow graphs are not guaranteed to be valid. That is, the may contain
# fusible basic blocks. This can happen if the creating the graph from
# unoptimized bytecode.
assert g.ValidateControlFlowGraph()
def test_ControlFlowGraphFromDotSource_node_names():
"""Test that CFG names are as expected."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
node_names = sorted([g.nodes[n]['name'] for n in g.nodes],
key=lambda x: int(x[1:]))
assert node_names == ['%2', '%9', '%12', '%18']
def test_ControlFlowGraphFromDotSource_edges():
"""Test that CFG edges are as expected."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
node_name_to_index_map = {g.nodes[n]["name"]: n for n in g.nodes}
edges = set(g.edges)
assert (node_name_to_index_map['%2'], node_name_to_index_map['%9']) in edges
assert (node_name_to_index_map['%2'], node_name_to_index_map['%12']) in edges
assert (node_name_to_index_map['%9'], node_name_to_index_map['%18']) in edges
assert (node_name_to_index_map['%12'], node_name_to_index_map['%18']) in edges
def test_ControlFlowGraphsFromBytecodes_num_graphs():
"""Test that expected number of CFGs are created."""
g = list(llvm_util.ControlFlowGraphsFromBytecodes([
SIMPLE_C_BYTECODE,
SIMPLE_C_BYTECODE,
SIMPLE_C_BYTECODE,
]))
assert len(g) == 6
def test_ControlFlowGraphsFromBytecodes_one_failure():
"""Errors during construction of CFGs are buffered until complete."""
# The middle job of the three will throw an opt.optException.
generator = llvm_util.ControlFlowGraphsFromBytecodes([
SIMPLE_C_BYTECODE,
"Invalid bytecode!",
SIMPLE_C_BYTECODE,
])
g = []
# We can still get all of the valid CFGs out of input[0] and input[2]. The
# exception from input[1] is will be raised once all processes have completed.
g.append(next(generator))
g.append(next(generator))
g.append(next(generator))
g.append(next(generator))
# Instead of StopIteration, an ExceptionBuffer will be thrown, which contains
# all the that were thrown, along with the inputs that caused the exception.
with pytest.raises(llvm_util.ExceptionBuffer) as e_ctx:
next(generator)
assert len(e_ctx.value.errors) == 1
assert e_ctx.value.errors[0].input == "Invalid bytecode!"
assert isinstance(e_ctx.value.errors[0].error, opt.OptException)
def main(argv):
"""Main entry point."""
if len(argv) > 1:
raise app.UsageError("Unknown arguments: '{}'.".format(' '.join(argv[1:])))
sys.exit(pytest.main([__file__, '-vv']))
if __name__ == '__main__':
flags.FLAGS(['argv[0]', '-v=1'])
app.run(main)
| [
"[email protected]"
] | |
2b887ca5322df9eb742eec5d14620c6a8c37621d | b5921afe6ea5cd8b3dcfc83147ab5893134a93d0 | /tl/contrib/tweepy/auth.py | 51ed3d90ae2fd53d749c402f1806617c2846a51b | [
"LicenseRef-scancode-other-permissive"
] | permissive | techdragon/tl | aaeb46e18849c04ad436e0e786401621a4be82ee | 6aba8aeafbc92cabdfd7bec11964f7c3f9cb835d | refs/heads/master | 2021-01-17T16:13:18.636457 | 2012-11-02T10:08:10 | 2012-11-02T10:08:10 | 9,296,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | # Tweepy
# Copyright 2009 Joshua Roesslein
# See LICENSE
from urllib.request import Request, urlopen
from urllib.parse import quote
import base64
from tweepy import oauth
from tweepy.error import TweepError
from tweepy.api import API
class AuthHandler(object):
def apply_auth(self, url, method, headers, parameters):
"""Apply authentication headers to request"""
raise NotImplementedError
def get_username(self):
"""Return the username of the authenticated user"""
raise NotImplementedError
class BasicAuthHandler(AuthHandler):
def __init__(self, username, password):
self.username = username
self._b64up = base64.b64encode(bytes('%s:%s' % (username, password), 'ascii'))
def apply_auth(self, url, method, headers, parameters):
headers['Authorization'] = 'Basic %s' % self._b64up.decode()
def get_username(self):
return self.username
class OAuthHandler(AuthHandler):
REQUEST_TOKEN_URL = 'http://api.twitter.com/oauth/request_token'
AUTHORIZATION_URL = 'http://api.twitter.com/oauth/authorize'
AUTHENTICATE_URL = 'http://api.twitter.com/oauth/authenticate'
ACCESS_TOKEN_URL = 'http://api.twitter.com/oauth/access_token'
def __init__(self, consumer_key, consumer_secret, callback=None):
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self._sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.request_token = None
self.access_token = None
self.callback = callback
self.username = None
def apply_auth(self, url, method, headers, parameters):
request = oauth.OAuthRequest.from_consumer_and_token(self._consumer,
http_url=url, http_method=method, token=self.access_token, parameters=parameters)
request.sign_request(self._sigmethod, self._consumer, self.access_token)
headers.update(request.to_header())
def _get_request_token(self):
try:
request = oauth.OAuthRequest.from_consumer_and_token(self._consumer,
http_url = self.REQUEST_TOKEN_URL, callback=self.callback)
request.sign_request(self._sigmethod, self._consumer, None)
resp = urlopen(Request(self.REQUEST_TOKEN_URL,
headers=request.to_header()), timeout=5.0)
return oauth.OAuthToken.from_string(resp.read().decode())
except Exception as e:
raise TweepError(e)
def set_access_token(self, key, secret):
self.access_token = oauth.OAuthToken(key, secret)
def get_authorization_url(self):
"""Get the authorization URL to redirect the user"""
try:
# get the request token
self.request_token = self._get_request_token()
# build auth request and return as url
request = oauth.OAuthRequest.from_token_and_callback(
token=self.request_token, http_url=self.AUTHORIZATION_URL)
return request.to_url()
except Exception as e:
raise TweepError(e)
def get_access_token(self, verifier=None):
"""
After user has authorized the request token, get access token
with user supplied verifier.
"""
try:
# build request
request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer,
token=self.request_token, http_url=self.ACCESS_TOKEN_URL,
verifier=str(verifier)
)
request.sign_request(self._sigmethod, self._consumer, self.request_token)
# send request
resp = urlopen(Request(self.ACCESS_TOKEN_URL, headers=request.to_header()))
self.access_token = oauth.OAuthToken.from_string(resp.read().decode())
return self.access_token
except Exception as e:
raise TweepError(e)
def get_username(self):
if self.username is None:
api = API(self)
user = api.verify_credentials()
if user:
self.username = user.screen_name
else:
raise TweepError("Unable to get username, invalid oauth token!")
return self.username
| [
"[email protected]"
] | |
87f68bcf618d998027044494849ca6cc6cbdb568 | b488060127559a3910ad5bf6642061019cc5f7df | /app/auth/views.py | f16dd5a46f53c65e4f7cb58c19eb52ce58c65ca7 | [] | no_license | hypnopompicindex/flasky | 1cf4e104bf68a192348049d651ddf7e35c6c6e0d | 2131bb49decd8a17d25078ab37205f12e22aefa1 | refs/heads/master | 2016-09-05T16:04:45.933010 | 2014-08-29T22:25:55 | 2014-08-29T22:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,999 | py | from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, \
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:5] != 'auth.':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
| [
"[email protected]"
] | |
a0647338bf9bf7f1b4ad381078643e483422723e | 825930f372fdf8c9c42cd2f9b1f424ab9de90b38 | /accounts/migrations/0003_order_note.py | 92701e816ce3c74d2368fbed83add82c8b9acf2c | [] | no_license | Xasanjon/crm2 | 56cbfa05d910144c75a3cdfe7423ba68fd576534 | 52279925e64e4268830fbeae6af897aef14b64d0 | refs/heads/master | 2023-07-02T04:13:33.928305 | 2021-08-16T14:53:43 | 2021-08-16T14:53:43 | 395,755,429 | 0 | 0 | null | 2021-08-16T14:53:44 | 2021-08-13T18:30:32 | Python | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.2 on 2021-08-02 20:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210725_0252'),
]
operations = [
migrations.AddField(
model_name='order',
name='note',
field=models.CharField(max_length=200, null=True),
),
]
| [
"[email protected]"
] | |
1b803449349f7c2d236f15348e6db398d826631f | 504344fc66e8d54081a17306d3012a16bbb81ee7 | /1_start_main.py | f5b040ad17b8d6c087939daec2d577d8e233f917 | [] | no_license | Ryanshuai/auto_pubg | 814753644a8e8e7aa3d7ca3c346a9e05b825c00d | 696f33f888efc441a74e142db878e836bbf3efee | refs/heads/master | 2022-09-21T12:13:24.155393 | 2020-11-12T20:03:43 | 2020-11-12T20:03:43 | 153,748,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from screen_parameter import show_position_y, show_position_x, show_size_y, show_size_x
from press_gun.robot import Robot
from state.all_states import All_States
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(show_size_x, show_size_y)
Dialog.move(show_position_x, show_position_y)
Dialog.setWindowFlag(QtCore.Qt.WindowStaysOnTopHint)
Dialog.setWindowFlag(QtCore.Qt.FramelessWindowHint)
# Dialog.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
Dialog.setFont(font)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(0, 0, show_size_x, show_size_y))
self.label.setObjectName("label")
QtCore.QMetaObject.connectSlotsByName(Dialog)
# self.robot = Robot(All_States(), is_calibrating=True)
self.robot = Robot(All_States())
self.robot.temp_qobject.state_str_signal[str].connect(self.retranslateUi)
def retranslateUi(self, text):
_translate = QtCore.QCoreApplication.translate
self.label.setText(_translate("Dialog", text))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
a8854b058391a3e400e059150fc9e2444400ab81 | d4b049d91795b5f8899f5ee60151a04be8890af9 | /litapplications/candidates/migrations/0037_auto_20170604_1531.py | 673c7eb72fcbdf9751afa92d8101506c0ee2c1c1 | [] | no_license | thatandromeda/litapplications | 3ab879c6edee1fd8424c3546eead47659699655a | d8b67d0b82ea14fb71b871f7563b7096640e4c25 | refs/heads/master | 2020-05-21T23:59:07.004211 | 2017-12-08T03:25:24 | 2017-12-08T03:25:24 | 64,570,749 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-06-04 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('candidates', '0036_auto_20170410_0025'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='year_end',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='appointment',
name='year_start',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
cf7998ad81daa62c4fba1aad1cf014844efd51c8 | c532e4d7466188ebbcca32413f592491eac9d7f8 | /leetcode/392.is-subsequence.py | e6cd21e9c3757a79129f956abc34459736ec87de | [] | no_license | Zedmor/hackerrank-puzzles | a1ff8601ea6d2bb3d2095909dfe00ef32346b74f | 2cc179bdb33a97294a2bf99dbda278e935165943 | refs/heads/master | 2023-01-10T13:57:26.649360 | 2023-01-04T03:27:05 | 2023-01-04T03:27:05 | 68,768,901 | 0 | 0 | null | 2017-03-05T18:24:18 | 2016-09-21T01:46:35 | Jupyter Notebook | UTF-8 | Python | false | false | 1,609 | py | #
# @lc app=leetcode id=392 lang=python3
#
# [392] Is Subsequence
#
# https://leetcode.com/problems/is-subsequence/description/
#
# algorithms
# Easy (47.97%)
# Total Accepted: 233.6K
# Total Submissions: 474.3K
# Testcase Example: '"abc"\n"ahbgdc"'
#
# Given a string s and a string t, check if s is subsequence of t.
#
# A subsequence of a string is a new string which is formed from the original
# string by deleting some (can be none) of the characters without disturbing
# the relative positions of the remaining characters. (ie, "ace" is a
# subsequence of "abcde" while "aec" is not).
#
# Follow up:
# If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you
# want to check one by one to see if T has its subsequence. In this scenario,
# how would you change your code?
#
# Credits:
# Special thanks to @pbrother for adding this problem and creating all test
# cases.
#
#
# Example 1:
# Input: s = "abc", t = "ahbgdc"
# Output: true
# Example 2:
# Input: s = "axc", t = "ahbgdc"
# Output: false
#
#
# Constraints:
#
#
# 0 <= s.length <= 100
# 0 <= t.length <= 10^4
# Both strings consists only of lowercase characters.
#
#
#
class Solution:
"""
>>> Solution().isSubsequence('abc', 'ahbgdc')
True
>>> Solution().isSubsequence('axc', 'ahbgdc')
False
"""
def isSubsequence(self, s: str, t: str) -> bool:
pointer_s = 0
pointer_t = 0
while pointer_t < len(t) and pointer_s < len(s):
if t[pointer_t] == s[pointer_s]:
pointer_s += 1
pointer_t += 1
return pointer_s == len(s)
| [
"[email protected]"
] | |
56a53cf0a36b5b36076f79e659a49128f7fa1265 | ada026a8588611f18a0bae44619aea6dc89c07a7 | /backend/event/models.py | 94899bc08145ed779dc022f61534cb2e63f156b5 | [] | no_license | crowdbotics-apps/iqraa-25096 | 5a363ec49766352d23de9348bfddcaed187b98c8 | 42def0722c287182c100ef46a4284236fbd2f04e | refs/heads/master | 2023-03-22T23:40:21.685747 | 2021-03-18T09:17:50 | 2021-03-18T09:17:50 | 349,008,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | from django.conf import settings
from django.db import models
class Vendor(models.Model):
"Generated Model"
name = models.TextField()
logo_image = models.SlugField(
null=True,
blank=True,
max_length=50,
)
type = models.TextField(
null=True,
blank=True,
)
website = models.URLField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendor_location",
)
category = models.ForeignKey(
"event.Category",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendor_category",
)
class MySchedule(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="myschedule_user",
)
schedule = models.ForeignKey(
"event.Schedule",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="myschedule_schedule",
)
class Faq(models.Model):
"Generated Model"
title = models.CharField(
max_length=256,
)
description = models.TextField()
class Sponsor(models.Model):
"Generated Model"
name = models.TextField()
logo_image = models.SlugField(
max_length=50,
)
sponsor_level = models.TextField()
presenter = models.BooleanField()
website = models.URLField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="sponsor_location",
)
class Favorites(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="favorites_user",
)
vendor = models.ForeignKey(
"event.Vendor",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="favorites_vendor",
)
class VendorDetail(models.Model):
"Generated Model"
website = models.URLField()
description = models.TextField()
associated_name = models.TextField(
null=True,
blank=True,
)
vendor_id = models.ForeignKey(
"event.Vendor",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendordetail_vendor_id",
)
class Location(models.Model):
"Generated Model"
amenities = models.TextField(
null=True,
blank=True,
)
name = models.CharField(
null=True,
blank=True,
max_length=256,
)
image = models.SlugField(
null=True,
blank=True,
max_length=50,
)
class Presenter(models.Model):
"Generated Model"
name = models.CharField(
max_length=256,
)
title = models.CharField(
max_length=256,
)
schedule = models.ForeignKey(
"event.Schedule",
on_delete=models.CASCADE,
related_name="presenter_schedule",
)
class Schedule(models.Model):
"Generated Model"
dateTime = models.DateTimeField()
description = models.TextField(
null=True,
blank=True,
)
track = models.TextField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="schedule_location",
)
class Category(models.Model):
"Generated Model"
description = models.TextField()
name = models.CharField(
null=True,
blank=True,
max_length=256,
)
# Create your models here.
| [
"[email protected]"
] | |
a65a5fe2737f2506964095d71631ff9e74b89d51 | 1b7f4cd39bf7e4a2cf667ac13244e5138ee86cb2 | /agents/displays/human_display.py | 4ad2949060ec0816a46e1db1e5ae89c9fd33bade | [
"MIT"
] | permissive | cjreynol/willsmith | 02f793003a914a21b181839bbd58108046f312d6 | 39d3b8caef8ba5825f3a0272c7fd61a2f78ef2b5 | refs/heads/master | 2020-07-15T13:25:57.613707 | 2018-06-12T00:18:19 | 2018-06-12T00:18:19 | 205,572,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | from tkinter import Button, Entry, Label, END
from willsmith.gui_display_controller import GUIDisplayController
class HumanDisplay(GUIDisplayController):
"""
Creates a Tkinter GUI that allows the user to input their moves.
"""
WINDOW_TITLE = "Human Agent"
LABEL_FONT = ("Courier New", 14)
def __init__(self):
super().__init__()
self.input_prompt_label = None
self.input_entry = None
self.submit_button = None
def _initialize_widgets(self):
self.input_prompt_label = Label(self.root, font = self.LABEL_FONT,
text = "<prompt here>")
self.input_entry = Entry(self.root)
self.submit_button = Button(self.root, text = "Submit")
def _place_widgets(self):
self.input_prompt_label.grid(row = 0, column = 0, columnspan = 2)
self.input_entry.grid(row = 1, column = 0)
self.submit_button.grid(row = 1, column = 1)
def _update_display(self, agent, action):
self._reset_display(agent)
def _reset_display(self, agent):
self.input_entry.delete(0, END)
def _submit_entry():
pass
| [
"[email protected]"
] | |
9999bb084c19897bd8e0f40f1449c5ab8305baec | 2a6d385c7737aea3c6b49eef9252babb7557b909 | /MCTools/test/lheTreeMaker.py | 7c6aa48eac2b99e552e3669d2e943613a8222e6a | [] | no_license | Sam-Harper/usercode | 1b302a4b647e479d27a9501f9576bd04b07e111a | fa43427fac80d773978ea67b78be58d264f39ec8 | refs/heads/120XNtup | 2022-08-26T12:59:53.388853 | 2022-07-12T16:52:46 | 2022-07-12T16:52:46 | 15,675,175 | 1 | 11 | null | 2022-07-21T13:27:57 | 2014-01-06T13:54:22 | Python | UTF-8 | Python | false | false | 2,051 | py | # Import configurations
import FWCore.ParameterSet.Config as cms
# set up process
process = cms.Process("PDF")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000),
limit = cms.untracked.int32(10000000)
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
import sys
filePrefex="file:"
if(sys.argv[2].find("/pnfs/")==0):
filePrefex="dcap://heplnx209.pp.rl.ac.uk:22125"
if(sys.argv[2].find("/store/")==0):
filePrefex=""
process.source = cms.Source("LHESource",
# fileNames = cms.untracked.vstring(filePrefex+sys.argv[2]),
# inputCommands = cms.untracked.vstring("drop *","keep *_source_*_*"),
fileNames = cms.untracked.vstring(),
)
for i in range(2,len(sys.argv)-2):
print filePrefex+sys.argv[i]
process.source.fileNames.extend([filePrefex+sys.argv[i],])
process.lheTreeMaker = cms.EDAnalyzer("LHETreeMaker",
datasetCode=cms.int32(-1),
# lheEventTag=cms.InputTag("externalLHEProducer"),
lheEventTag=cms.InputTag("source"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("output.root")
)
isCrabJob=False #script seds this if its a crab job
#if 1, its a crab job...
if isCrabJob:
print "using crab specified filename"
process.TFileService.fileName= "OUTPUTFILE"
process.lheTreeMaker.datasetCode = DATASETCODE
else:
print "using user specified filename"
process.TFileService.fileName= sys.argv[len(sys.argv)-1]
process.lheTreeMaker.datasetCode = int(sys.argv[len(sys.argv)-2])
process.p = cms.Path(
process.lheTreeMaker)
| [
"[email protected]"
] | |
22fcbd946d08b1b0360883cebf92843acdabaae0 | 853c6a09af16fd4dd8a53efa8bde631e63315b59 | /Programmers/correct braket.py | 0936aa47db0765ce63be6d8daa769dea7d790e1b | [] | no_license | Areum0921/Abox | 92840897b53e9bbab35c0e0aae5a017ab19a0500 | f4739c0c0835054afeca82484769e71fb8de47c8 | refs/heads/master | 2021-12-13T11:16:33.583366 | 2021-10-10T08:09:50 | 2021-10-10T08:09:50 | 176,221,995 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | def solution(s):
answer = True
stack = []
for i in s:
if (i == '('):
stack.append(i)
else:
if (stack):
stack.pop()
else:
return False # 짝이 안맞을때
if (stack): # 스택에 남아 있는게 있을때
return False
return True | [
"[email protected]"
] | |
4b3961aa5d8906bd87af450467577e695d335f83 | b0c0008213c633e6d32d8536a98934047f38ba17 | /consumer.py | e8cd2071c6864f984bb83cc67f04e9e66677ddc7 | [] | no_license | benthomasson/kafka-test | 8363f6a880544a6037e88d01b33954524b3b38ac | 95b1e89dd5a009b47a35ac5886c1980e2c5d5fcc | refs/heads/master | 2020-06-13T17:34:55.464840 | 2019-07-01T19:49:22 | 2019-07-01T19:49:22 | 194,734,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
consumer [options]
Options:
-h, --help Show this page
--debug Show debug logging
--verbose Show verbose logging
"""
from docopt import docopt
import logging
import sys
from kafka import KafkaConsumer
logger = logging.getLogger('consumer')
def main(args=None):
if args is None:
args = sys.argv[1:]
parsed_args = docopt(__doc__, args)
if parsed_args['--debug']:
logging.basicConfig(level=logging.DEBUG)
elif parsed_args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
consumer = KafkaConsumer('my_favorite_topic', bootstrap_servers='127.0.0.1:9092', group_id="mygroup", auto_offset_reset='earliest')
for msg in consumer:
print(msg)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
| [
"[email protected]"
] | |
7491ded17babba2e25a320468b4c7f2d03ec8014 | 2d67afd40a0425c843aa8643df9f7d5653ad0369 | /python/leetcode/836_Rectangle_Overlap.py | 07a9c93dabe68189566acdbdd57f7dd25eead09a | [] | no_license | bobcaoge/my-code | 2f4ff5e276bb6e657f5a63108407ebfbb11fbf64 | 70bdd75b6af2e1811c1beab22050c01d28d7373e | refs/heads/master | 2022-12-23T22:38:10.003058 | 2020-07-02T03:52:43 | 2020-07-02T03:52:43 | 248,733,053 | 0 | 0 | null | 2022-12-10T05:41:57 | 2020-03-20T10:55:55 | Python | UTF-8 | Python | false | false | 683 | py | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
class Solution(object):
def isRectangleOverlap(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
return not (rec2[0] >= rec1[2] or rec2[1] >= rec1[3] or rec2[2] <= rec1[0] or rec2[3] <= rec1[1])
def isRectangleOverlap1(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
x1,y1, x2, y2 = rec1
x11,y11, x22, y22 = rec2
return not (x11 >= x2 or y11 >= y2 or x22 <= x1 or y22 <= y1)
def main():
s = Solution()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8f73960e9cd985f473f88967aa7424ab07f7bcbe | aa692f369966074141873a473894362913750e01 | /reportform/asgi.py | 52f03e6cc3a8f2461ec7170e6d02e3bf734d97bc | [] | no_license | yashacon/Progress_form | d8747d6ba28266cabd2c88ecfdcf4816c7350569 | 0f26733383f79e9e34992cd12a308a410c27f37f | refs/heads/master | 2022-04-22T14:47:05.119632 | 2020-04-19T15:14:16 | 2020-04-19T15:14:16 | 257,029,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for reportform project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportform.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
c33743585b9a553e3d3858a7fff83eb8abfe4436 | 7f1d31cf00f8a1fc175d67c7be6e11367179d3f6 | /tests/nlu/extractors/test_extractor.py | b0739e047c43aac5b670854f89971dc56ef5e29e | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] | permissive | russosanti/rasa | 226ec14e3a326ba2ad9cb0aae57c79465c88b5ab | 21fb0cc8e92cf270e4228438cb386f1d6f364563 | refs/heads/master | 2023-04-07T13:25:53.848512 | 2020-04-16T21:59:58 | 2020-04-16T21:59:58 | 256,278,604 | 0 | 1 | Apache-2.0 | 2020-04-16T17:05:06 | 2020-04-16T17:05:05 | null | UTF-8 | Python | false | false | 7,622 | py | from typing import Any, Text, Dict, List
import pytest
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.training_data import Message
from rasa.nlu.extractors.extractor import EntityExtractor
@pytest.mark.parametrize(
"text, tokens, entities, keep, expected_entities",
[
(
"Aarhus is a city",
[
Token("Aar", 0, 3),
Token("hus", 3, 6),
Token("is", 7, 9),
Token("a", 10, 11),
Token("city", 12, 16),
],
[
{"entity": "iata", "start": 0, "end": 3, "value": "Aar"},
{"entity": "city", "start": 3, "end": 6, "value": "hus"},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
False,
[{"entity": "location", "start": 12, "end": 16, "value": "city"}],
),
(
"Aarhus",
[Token("Aar", 0, 3), Token("hus", 3, 6)],
[
{"entity": "iata", "start": 0, "end": 3, "value": "Aar"},
{"entity": "city", "start": 3, "end": 6, "value": "hus"},
],
True,
[],
),
(
"Aarhus city",
[Token("Aarhus", 0, 6), Token("city", 7, 11)],
[
{"entity": "city", "start": 0, "end": 6, "value": "Aarhus"},
{"entity": "type", "start": 7, "end": 11, "value": "city"},
],
False,
[
{"entity": "city", "start": 0, "end": 6, "value": "Aarhus"},
{"entity": "type", "start": 7, "end": 11, "value": "city"},
],
),
(
"Aarhus is a city",
[
Token("Aar", 0, 3),
Token("hus", 3, 6),
Token("is", 7, 9),
Token("a", 10, 11),
Token("city", 12, 16),
],
[
{
"entity": "city",
"start": 0,
"end": 3,
"confidence": 0.87,
"value": "Aar",
},
{
"entity": "iata",
"start": 3,
"end": 6,
"confidence": 0.43,
"value": "hus",
},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.87,
"value": "Aarhus",
},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "iata",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
},
{
"entity": "city",
"start": 2,
"end": 3,
"confidence": 0.87,
"value": "r",
},
{
"entity": "iata",
"start": 3,
"end": 5,
"confidence": 0.21,
"value": "hu",
},
{
"entity": "city",
"start": 5,
"end": 6,
"confidence": 0.43,
"value": "s",
},
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.87,
"value": "Aarhus",
}
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "city",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
}
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.32,
"value": "Aarhus",
}
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "city",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
}
],
False,
[],
),
(
"Buenos Aires is a city",
[
Token("Buenos", 0, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 0, "end": 9, "value": "Buenos Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
False,
[{"entity": "location", "start": 18, "end": 22, "value": "city"}],
),
(
"Buenos Aires is a city",
[
Token("Buenos", 0, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 0, "end": 9, "value": "Buenos Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
True,
[
{"entity": "city", "start": 0, "end": 12, "value": "Buenos Aires"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
),
(
"Buenos Aires is a city",
[
Token("Buen", 0, 4),
Token("os", 4, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 4, "end": 9, "value": "os Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
True,
[
{"entity": "city", "start": 0, "end": 12, "value": "Buenos Aires"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
),
],
)
def test_clean_up_entities(
text: Text,
tokens: List[Token],
entities: List[Dict[Text, Any]],
keep: bool,
expected_entities: List[Dict[Text, Any]],
):
extractor = EntityExtractor()
message = Message(text)
message.set("tokens", tokens)
updated_entities = extractor.clean_up_entities(message, entities, keep)
assert updated_entities == expected_entities
| [
"[email protected]"
] | |
eaf21fc64fa4a9963db8428a6d85332bb1f68acf | d2fc4d45b115fb861097657d00b3c5cb08e8a3ad | /scenarios/bank_account_delete/executable.py | 22c888baa03eda9722fd271e2a6f2c9a58e213cb | [] | no_license | jess010/balanced-python | 81b39f0e9d3ce52d60f2453b8c98e77f07ee3acb | b7a6bf0430ad0299d96de15ea97d3d4ccfb4c958 | refs/heads/master | 2020-12-25T16:13:35.626111 | 2013-09-20T00:14:58 | 2013-09-20T00:14:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import balanced
balanced.configure('b5de51921b2d11e389c4026ba7cac9da')
bank_account = balanced.BankAccount.find('/v1/bank_accounts/BA5YXVcU9ExcM8jXQhQt7ZY6')
bank_account.delete() | [
"[email protected]"
] | |
86ba2b6052d3e743fb070ef7f0e05d157df3fe4d | 0dee7cc69ae44e30c5cb372eb17f2e469635056b | /AirBnB_clone_v3/api/v1/app.py | 3b75dd5b7cb112990fe65ac206b8bb1c37bb41c1 | [
"LicenseRef-scancode-public-domain"
] | permissive | HausCloud/Holberton | 00cd25b4a489041e041551ea8f87674d53f43713 | b39c5978698e02b9e746121d6c55d791b73e6d9b | refs/heads/master | 2022-12-13T01:06:18.968047 | 2020-09-05T18:23:00 | 2020-09-05T18:23:00 | 293,129,232 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | #!/usr/bin/python3
''' py file to connect to API '''
from os import getenv
from models import storage
from api.v1.views import app_views
from flask import Flask, Blueprint, jsonify, make_response
from flask_cors import CORS
app = Flask(__name__)
app.register_blueprint(app_views)
cors = CORS(app, resources={"/*": {"origins": "0.0.0.0"}})
@app.teardown_appcontext
def teardown_appcontext(code):
'closes storage method'
storage.close()
@app.errorhandler(404)
def errorhandler404(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == "__main__":
app.run(host=getenv('HBNB_API_HOST', '0.0.0.0'),
port=int(getenv('HBNB_API_PORT', '5000')))
| [
"[email protected]"
] | |
3cc8dd50235d65c9fa40a0006df2519f1713d6ca | 6343534aaf5483b3fab219c14b5c33726d5d196e | /shopinglyx/wsgi.py | 59325b034274c335d3d9de6740a9eb9057a07476 | [] | no_license | Emad-ahmed/Ful_Django_Shoping_website | 8441b2caa1214c8df9399dceed5e53fa37cd86cb | 6eefe47749b5cd6b1a2422e9cf717e9584b13dce | refs/heads/main | 2023-05-20T11:30:54.194116 | 2021-06-09T21:07:18 | 2021-06-09T21:07:18 | 375,272,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for shopinglyx project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopinglyx.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
eb6b102c5a47abdb1fddc5dd164e9607a07d4269 | 918f0fdef0e9224aa1a0596479178618290055ec | /mmdet3d/core/post_processing/__init__.py | 42eb5bf2b1a4c0fb4cb35c38a74847ade71b7eee | [
"Apache-2.0"
] | permissive | Tsinghua-MARS-Lab/futr3d | b7eb3a0c9d92a58759c9c43e96bfd024a2e3de96 | 9130d71e487bad47f5dbcffd696fe9e4a838104f | refs/heads/main | 2023-07-24T15:40:00.121665 | 2023-07-06T05:50:45 | 2023-07-06T05:50:45 | 499,766,918 | 188 | 27 | MIT | 2022-06-19T10:42:03 | 2022-06-04T08:21:17 | Python | UTF-8 | Python | false | false | 677 | py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.post_processing import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores,
multiclass_nms)
from .box3d_nms import (aligned_3d_nms, box3d_multiclass_nms, circle_nms,
nms_bev, nms_normal_bev)
from .merge_augs import merge_aug_bboxes_3d
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'box3d_multiclass_nms',
'aligned_3d_nms', 'merge_aug_bboxes_3d', 'circle_nms', 'nms_bev',
'nms_normal_bev'
]
| [
"[email protected]"
] | |
16fddf5472889066e69ad211c65e0c0eaae42d50 | 8255dcf7689c20283b5e75a452139e553b34ddf3 | /run.py | 8ba9ab77eb8ef361a982ca743d65c0d45897d818 | [
"MIT"
] | permissive | Wern-rm/raton.by | 09871eb4da628ff7b0d0b4415a150cf6c12c3e5a | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | refs/heads/main | 2023-05-06T02:26:58.980779 | 2021-05-25T14:09:47 | 2021-05-25T14:09:47 | 317,119,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from app import create_app
app = create_app()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=16000) | [
"[email protected]"
] | |
18f0da7b593b5bb208b8eb0ca6e1b1d9ca239b38 | 04ab6e4cbc7034ba1a0e43d7cdb6d3547534496b | /q2_vsearch/tests/test_join_pairs.py | cbc95074d6c6bacf1fa44b7eaf435988d38e1278 | [
"BSD-3-Clause"
] | permissive | gregcaporaso/q2-vsearch | bd99c1db3d4af06bc691d5111ac60099d13621f2 | 48e2d77f40c3b1841ca15789c6a02fd5077d9a58 | refs/heads/master | 2020-06-10T20:53:03.018489 | 2017-12-20T18:59:30 | 2017-12-20T18:59:30 | 75,876,917 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,263 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import skbio
import numpy as np
import pandas as pd
from qiime2.plugin.testing import TestPluginBase
from qiime2.util import redirected_stdio
from q2_types.per_sample_sequences import (
SingleLanePerSamplePairedEndFastqDirFmt,
FastqGzFormat)
from q2_vsearch._join_pairs import join_pairs, _join_pairs_w_command_output
class MergePairsTests(TestPluginBase):
package = 'q2_vsearch.tests'
def setUp(self):
super().setUp()
self.input_seqs = SingleLanePerSamplePairedEndFastqDirFmt(
self.get_data_path('demux-1'), 'r')
def _parse_manifest(self, demultiplexed_seqs):
return pd.read_csv(
os.path.join(str(demultiplexed_seqs),
demultiplexed_seqs.manifest.pathspec),
header=0, comment='#')
def _test_manifest(self, demultiplexed_seqs):
manifest = self._parse_manifest(demultiplexed_seqs)
self.assertEqual(len(manifest), 3)
self.assertEqual(list(manifest['sample-id']),
['BAQ2687.1', 'BAQ3473.2', 'BAQ4697.2'])
self.assertEqual(list(manifest['filename']),
['BAQ2687.1_0_L001_R1_001.fastq.gz',
'BAQ3473.2_1_L001_R1_001.fastq.gz',
'BAQ4697.2_2_L001_R1_001.fastq.gz'])
self.assertEqual(list(manifest['direction']),
['forward', 'forward', 'forward'])
def _test_seq_lengths(self, seq_lengths):
self.assertTrue(seq_lengths.mean() > 200)
for e in seq_lengths:
# input reads are 151 bases, so all output must be longer
self.assertTrue(e > 151)
def test_join_pairs(self):
with redirected_stdio(stderr=os.devnull):
obs = join_pairs(self.input_seqs)
# manifest is as expected
self._test_manifest(obs)
# expected number of fastq files are created
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# The following values were determined by running vsearch directly
# with default parameters. It is possible that different versions of
# vsearch will result in differences in these numbers, and that
# the corresponding tests may therefore be too specific. We'll have
# to adjust the tests if that's the case.
default_exp_sequence_counts = {
'BAQ2687.1_0_L001_R1_001.fastq.gz': 806,
'BAQ3473.2_1_L001_R1_001.fastq.gz': 753,
'BAQ4697.2_2_L001_R1_001.fastq.gz': 711,
}
for fastq_name, fastq_path in output_fastqs:
seqs = skbio.io.read(str(fastq_path), format='fastq',
compression='gzip', constructor=skbio.DNA)
seqs = list(seqs)
seq_lengths = np.asarray([len(s) for s in seqs])
self._test_seq_lengths(seq_lengths)
# expected number of sequences are joined
self.assertEqual(
len(seq_lengths),
default_exp_sequence_counts[str(fastq_name)])
def test_join_pairs_some_samples_w_no_joined_seqs(self):
# minmergelen is set very high here, resulting in only one sequence
# being joined across the three samples.
with redirected_stdio(stderr=os.devnull):
obs = join_pairs(self.input_seqs, minmergelen=279)
# manifest is as expected
self._test_manifest(obs)
# expected number of fastq files are created
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# The following values were determined by running vsearch directly.
exp_sequence_counts = {
'BAQ2687.1_0_L001_R1_001.fastq.gz': 0,
'BAQ3473.2_1_L001_R1_001.fastq.gz': 2,
'BAQ4697.2_2_L001_R1_001.fastq.gz': 0,
}
for fastq_name, fastq_path in output_fastqs:
with redirected_stdio(stderr=os.devnull):
seqs = skbio.io.read(str(fastq_path), format='fastq',
compression='gzip', constructor=skbio.DNA)
seqs = list(seqs)
seq_lengths = np.asarray([len(s) for s in seqs])
# expected number of sequences are joined
self.assertEqual(
len(seq_lengths),
exp_sequence_counts[str(fastq_name)])
def test_join_pairs_all_samples_w_no_joined_seqs(self):
# minmergelen is set very high here, resulting in no sequences
# being joined across the three samples.
with redirected_stdio(stderr=os.devnull):
obs = join_pairs(self.input_seqs, minmergelen=500)
# manifest is as expected
self._test_manifest(obs)
# expected number of fastq files are created
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
for fastq_name, fastq_path in output_fastqs:
with redirected_stdio(stderr=os.devnull):
seqs = skbio.io.read(str(fastq_path), format='fastq',
compression='gzip', constructor=skbio.DNA)
seqs = list(seqs)
seq_lengths = np.asarray([len(s) for s in seqs])
self.assertEqual(len(seq_lengths), 0)
def test_join_pairs_alt_truncqual(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, truncqual=5)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_truncqual 5' in ' '.join(cmd))
def test_join_pairs_alt_minlen(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, minlen=25)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_minlen 25' in ' '.join(cmd))
def test_join_pairs_alt_maxns(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, maxns=2)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_maxns 2' in ' '.join(cmd))
def test_join_pairs_alt_allowmergestagger(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, allowmergestagger=True)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_allowmergestagger' in cmd)
def test_join_pairs_alt_minovlen(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, minovlen=42)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_minovlen 42' in ' '.join(cmd))
def test_join_pairs_alt_maxdiffs(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, maxdiffs=2)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_maxdiffs 2' in ' '.join(cmd))
def test_join_pairs_alt_minmergelen(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, minmergelen=250)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_minmergelen 250' in ' '.join(cmd))
def test_join_pairs_alt_maxmergelen(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, maxmergelen=250)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_maxmergelen 250' in ' '.join(cmd))
def test_join_pairs_alt_maxee(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, maxee=25.0)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_maxee 25.0' in ' '.join(cmd))
def test_join_pairs_alt_qmin(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, qmin=-1)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_qmin -1' in ' '.join(cmd))
def test_join_pairs_alt_qminout(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, qminout=-1)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_qminout -1' in ' '.join(cmd))
def test_join_pairs_alt_qmax(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, qmax=40)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_qmax 40' in ' '.join(cmd))
def test_join_pairs_alt_qmaxout(self):
with redirected_stdio(stderr=os.devnull):
cmd, obs = _join_pairs_w_command_output(
self.input_seqs, qmaxout=40)
# sanity check the output
self._test_manifest(obs)
output_fastqs = list(obs.sequences.iter_views(FastqGzFormat))
self.assertEqual(len(output_fastqs), 3)
# confirm altered parameter was passed to vsearch
self.assertTrue('--fastq_qmaxout 40' in ' '.join(cmd))
| [
"[email protected]"
] | |
5554e807f98c00e8a594c894b58a6069820180ad | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/monitor/models/NoticeOption.py | 5782d8ff633350bf7e0c26947724e00a101fa9a3 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,661 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class NoticeOption(object):
def __init__(self, effectiveIntervalEnd=None, effectiveIntervalStart=None, noticeCondition=None, noticePeriod=None, noticeWay=None):
"""
:param effectiveIntervalEnd: (Optional) 生效截止时间,默认值:23:59:59
:param effectiveIntervalStart: (Optional) 生效起始时间,默认值:00:00:00
:param noticeCondition: (Optional) 通知条件 1-告警 2-数据不足3-告警恢复
:param noticePeriod: (Optional) 通知沉默周期,单位:分钟,默认值:24小时,目前支持的取值“24小时、12小时、6小时、3小时、1小时、30分钟、15分钟、10分钟、5分钟”
:param noticeWay: (Optional) 通知方法 1-短信 2-邮件
"""
self.effectiveIntervalEnd = effectiveIntervalEnd
self.effectiveIntervalStart = effectiveIntervalStart
self.noticeCondition = noticeCondition
self.noticePeriod = noticePeriod
self.noticeWay = noticeWay
| [
"[email protected]"
] | |
62be4bbd7ede776de75f9f3f2fd3dc6801ebcfda | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /src/sentry/search/django/constants.py | d07708d74a015bdd0bd4f0411ae69587e4b956d6 | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,622 | py | """
sentry.search.django.constants
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
SORT_CLAUSES = {
'priority': 'sentry_groupedmessage.score',
'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)::int',
'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)::int',
'freq': 'sentry_groupedmessage.times_seen',
}
SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES.update({
'date': "cast((julianday(sentry_groupedmessage.last_seen) - 2440587.5) * 86400.0 as INTEGER)",
'new': "cast((julianday(sentry_groupedmessage.first_seen) - 2440587.5) * 86400.0 as INTEGER)",
})
MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES.update({
'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)',
'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)',
})
ORACLE_SORT_CLAUSES = SORT_CLAUSES.copy()
ORACLE_SORT_CLAUSES.update({
'date': "(cast(sentry_groupedmessage.last_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60",
'new': "(cast(sentry_groupedmessage.first_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60",
})
MSSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MSSQL_SORT_CLAUSES.update({
'date': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.last_seen)",
'new': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.first_seen)",
})
MSSQL_ENGINES = set(['django_pytds', 'sqlserver_ado', 'sql_server.pyodbc'])
| [
"[email protected]"
] | |
af07e96835aac06a7e756e51bb65e5c49aedfcfb | 56997c84a331433225f89f168520ad8d709083c1 | /Programmers/DFS_BFS/네트워크/network_ver1.py | 84f304c62bd83cd9430505ab8f1a79b96656c2e5 | [] | no_license | miseop25/Back_Jun_Code_Study | 51e080f8ecf74f7d1a8bb1da404d29c8ba52325c | 1d993e718c37c571aae1d407054ec284dc24c922 | refs/heads/master | 2022-11-06T01:05:05.028838 | 2022-10-23T13:11:22 | 2022-10-23T13:11:22 | 200,828,984 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | class Node :
def __init__(self, data) :
self.data = data
self.child = []
class Network :
def __init__(self, n, computers) :
self.nodeDict = dict()
self.n = n
for i in range(n) :
self.nodeDict[i] = Node(i)
self.connectNode(computers)
self.check = [True for _ in range(n)]
def connectNode(self, computers) :
for i in range(self.n) :
for j in range(self.n) :
if i == j :
continue
if computers[i][j] == 1:
self.nodeDict[i].child.append(j)
def dfsNetwork(self, target) :
self.check[target.data] = False
for i in target.child :
if self.check[i] :
self.dfsNetwork(self.nodeDict[i])
def getAnswer(self) :
answer = 0
for i in range(self.n) :
if self.check[i] :
answer += 1
self.dfsNetwork(self.nodeDict[i])
return answer
def solution(n, computers):
answer = 0
t = Network(n, computers)
answer = t.getAnswer()
return answer
print(solution(3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]])) | [
"[email protected]"
] | |
f7716ffbae61d7010cae9cc57710c47a3dd80743 | a9f676c06bacee1f8b27e08d3c411c89a69cfd40 | /falmer/content/migrations/0012_homepage.py | c6db737a1ea4de523845df670a1e021053dc211d | [
"MIT"
] | permissive | sussexstudent/falmer | 1b877c3ac75a0477f155ce1a9dee93a5ada686d6 | ae735bd9d6177002c3d986e5c19a78102233308f | refs/heads/master | 2022-12-11T19:40:12.232488 | 2020-03-20T13:01:47 | 2020-03-20T13:01:47 | 88,043,958 | 2 | 3 | MIT | 2022-12-08T03:17:26 | 2017-04-12T11:24:02 | Python | UTF-8 | Python | false | false | 1,364 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 14:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import falmer.content.blocks
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0039_collectionviewrestriction'),
('content', '0011_auto_20170622_1345'),
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('full_time_officers', wagtail.core.fields.StreamField((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=True)), ('image', falmer.content.blocks.ImageBlock())))),
('part_time_officers', wagtail.core.fields.StreamField((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=True)), ('image', falmer.content.blocks.ImageBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"[email protected]"
] | |
8e2ce871dc44558694ebd870b565b602b4058d98 | 234c7fb0bdabdd696c8e4c6a449ac2c8e3f14ad5 | /build/PureCloudPlatformClientV2/models/nlu_detection_request.py | 2cbab60154a60c1e7d754276de069675dcb500c2 | [
"Apache-2.0",
"MIT"
] | permissive | humano7/platform-client-sdk-python | 2a942c43cc2d69e8cb0c4113d998e6e0664fdedb | dd5b693b1fc90c9dcb36885d7227f11221db5980 | refs/heads/master | 2023-04-12T05:05:53.932393 | 2021-04-22T03:41:22 | 2021-04-22T03:41:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,350 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class NluDetectionRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NluDetectionRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'input': 'NluDetectionInput',
'context': 'NluDetectionContext'
}
self.attribute_map = {
'input': 'input',
'context': 'context'
}
self._input = None
self._context = None
@property
def input(self):
"""
Gets the input of this NluDetectionRequest.
The input subject to NLU detection.
:return: The input of this NluDetectionRequest.
:rtype: NluDetectionInput
"""
return self._input
@input.setter
def input(self, input):
"""
Sets the input of this NluDetectionRequest.
The input subject to NLU detection.
:param input: The input of this NluDetectionRequest.
:type: NluDetectionInput
"""
self._input = input
@property
def context(self):
"""
Gets the context of this NluDetectionRequest.
The context for the input to NLU detection.
:return: The context of this NluDetectionRequest.
:rtype: NluDetectionContext
"""
return self._context
@context.setter
def context(self, context):
"""
Sets the context of this NluDetectionRequest.
The context for the input to NLU detection.
:param context: The context of this NluDetectionRequest.
:type: NluDetectionContext
"""
self._context = context
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
c421d5537c5e8ad2bc05d6403e45f2fbd124db1c | 63dd919e1551fbabdad3f311b96040742e4ecb53 | /discpy/team.py | af40c20d54199589e72d24df8658cf9c23d632a3 | [
"MIT"
] | permissive | AryamanSrii/DiscPy | 4d45d2f52c21b31bb84a17dd95af421c9d563bd6 | 0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353 | refs/heads/main | 2023-08-17T14:42:45.936056 | 2021-10-01T15:12:17 | 2021-10-01T15:12:17 | 414,887,513 | 0 | 0 | MIT | 2021-10-08T07:24:33 | 2021-10-08T07:24:32 | null | UTF-8 | Python | false | false | 4,768 | py | """
The MIT License (MIT)
Copyright (c) 2021 The DiscPy Developers
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from . import utils
from .user import BaseUser
from .asset import Asset
from .enums import TeamMembershipState, try_enum
from typing import TYPE_CHECKING, Optional, List
if TYPE_CHECKING:
from .state import ConnectionState
from .types.team import (
Team as TeamPayload,
TeamMember as TeamMemberPayload,
)
__all__ = (
"Team",
"TeamMember",
)
class Team:
"""Represents an application team for a bot provided by Discord.
Attributes
-------------
id: :class:`int`
The team ID.
name: :class:`str`
The team name
owner_id: :class:`int`
The team's owner ID.
members: List[:class:`TeamMember`]
A list of the members in the team
.. versionadded:: 1.3
"""
__slots__ = ("_state", "id", "name", "_icon", "owner_id", "members")
def __init__(self, state: ConnectionState, data: TeamPayload):
self._state: ConnectionState = state
self.id: int = int(data["id"])
self.name: str = data["name"]
self._icon: Optional[str] = data["icon"]
self.owner_id: Optional[int] = utils._get_as_snowflake(data, "owner_user_id")
self.members: List[TeamMember] = [
TeamMember(self, self._state, member) for member in data["members"]
]
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id} name={self.name}>"
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`.Asset`]: Retrieves the team's icon asset, if any."""
if self._icon is None:
return None
return Asset._from_icon(self._state, self.id, self._icon, path="team")
@property
def owner(self) -> Optional[TeamMember]:
"""Optional[:class:`TeamMember`]: The team's owner."""
return utils.get(self.members, id=self.owner_id)
class TeamMember(BaseUser):
"""Represents a team member in a team.
.. container:: operations
.. describe:: x == y
Checks if two team members are equal.
.. describe:: x != y
Checks if two team members are not equal.
.. describe:: hash(x)
Return the team member's hash.
.. describe:: str(x)
Returns the team member's name with discriminator.
.. versionadded:: 1.3
Attributes
-------------
name: :class:`str`
The team member's username.
id: :class:`int`
The team member's unique ID.
discriminator: :class:`str`
The team member's discriminator. This is given when the username has conflicts.
avatar: Optional[:class:`str`]
The avatar hash the team member has. Could be None.
bot: :class:`bool`
Specifies if the user is a bot account.
team: :class:`Team`
The team that the member is from.
membership_state: :class:`TeamMembershipState`
The membership state of the member (e.g. invited or accepted)
"""
__slots__ = ("team", "membership_state", "permissions")
def __init__(self, team: Team, state: ConnectionState, data: TeamMemberPayload):
self.team: Team = team
self.membership_state: TeamMembershipState = try_enum(
TeamMembershipState, data["membership_state"]
)
self.permissions: List[str] = data["permissions"]
super().__init__(state=state, data=data["user"])
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} id={self.id} name={self.name!r} "
f"discriminator={self.discriminator!r} membership_state={self.membership_state!r}>"
)
| [
"[email protected]"
] | |
66dd3074a663a4bbf8256161ed6ac35f783c75e9 | 926621c29eb55046f9f59750db09bdb24ed3078e | /lib/googlecloudsdk/third_party/py27/py27_collections.py | 46e053f8a1b7ee5b2c1180e599017396ae70b3db | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/SDK | 525d9b29fb2e901aa79697c9dcdf5ddd852859ab | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | refs/heads/master | 2022-11-22T18:24:13.464605 | 2016-05-18T16:53:30 | 2016-05-18T16:53:30 | 282,322,505 | 0 | 0 | NOASSERTION | 2020-07-24T21:52:25 | 2020-07-24T21:52:24 | null | UTF-8 | Python | false | false | 755 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python 2.7 collections module compatibility for 2.6."""
# pylint: disable=wildcard-import
from googlecloudsdk.third_party.py27.mirror.collections import *
| [
"[email protected]"
] | |
14384b1bf58a425a8c632b2ccd3f330ffcc1c262 | c315c2d9ea4b0d43768964c46611afca242d3cdc | /input_pipeline/pipeline.py | 215a7e219e2e1ff50175e8c2946702d086a03d2b | [
"MIT"
] | permissive | TropComplique/EDANet | 9685c986b8e25fab7a2db14803ab713602df65a2 | ec4fd0d2693ce4ae5b81664e22adf9bf6c81f4a7 | refs/heads/master | 2020-04-04T14:04:46.228608 | 2019-01-07T12:40:42 | 2019-01-07T12:40:42 | 155,986,060 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,384 | py | import tensorflow as tf
from .random_rotation import random_rotation
from .color_augmentations import random_color_manipulations, random_pixel_value_scale
SHUFFLE_BUFFER_SIZE = 5000
NUM_PARALLEL_CALLS = 12
RESIZE_METHOD = tf.image.ResizeMethod.BILINEAR
MIN_CROP_SIZE = 0.9
ROTATE = False
class Pipeline:
def __init__(self, filenames, is_training, params):
"""
During the evaluation we don't resize images.
Arguments:
filenames: a list of strings, paths to tfrecords files.
is_training: a boolean.
params: a dict.
"""
self.is_training = is_training
self.num_labels = params['num_labels'] # it can be None
if is_training:
batch_size = params['batch_size']
height = params['image_height']
width = params['image_width']
self.image_size = [height, width]
else:
batch_size = 1
def get_num_samples(filename):
return sum(1 for _ in tf.python_io.tf_record_iterator(filename))
num_examples = 0
for filename in filenames:
num_examples_in_file = get_num_samples(filename)
assert num_examples_in_file > 0
num_examples += num_examples_in_file
self.num_examples = num_examples
assert self.num_examples > 0
dataset = tf.data.Dataset.from_tensor_slices(filenames)
num_shards = len(filenames)
if is_training:
dataset = dataset.shuffle(buffer_size=num_shards)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
dataset = dataset.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE)
dataset = dataset.repeat(None if is_training else 1)
dataset = dataset.map(self._parse_and_preprocess, num_parallel_calls=NUM_PARALLEL_CALLS)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=1)
self.dataset = dataset
def _parse_and_preprocess(self, example_proto):
"""What this function does:
1. Parses one record from a tfrecords file and decodes it.
2. (optionally) Augments it.
Returns:
image: a float tensor with shape [image_height, image_width, 3],
an RGB image with pixel values in the range [0, 1].
labels: an int tensor with shape [image_height, image_width].
The values that it can contain are {0, 1, ..., num_labels - 1}.
It also can contain ignore label: `num_labels`.
"""
features = {
'image': tf.FixedLenFeature([], tf.string),
'masks': tf.FixedLenFeature([], tf.string)
}
parsed_features = tf.parse_single_example(example_proto, features)
# get an image
image = tf.image.decode_jpeg(parsed_features['image'], channels=3)
image_height, image_width = tf.shape(image)[0], tf.shape(image)[1]
image = tf.image.convert_image_dtype(image, tf.float32)
# now pixel values are scaled to the [0, 1] range
# get a segmentation labels
labels = tf.image.decode_png(parsed_features['masks'], channels=1)
if self.is_training:
image, labels = self.augmentation(image, labels)
labels = tf.squeeze(labels, 2)
labels = tf.to_int32(labels)
return image, labels
def augmentation(self, image, labels):
if ROTATE:
assert self.num_labels is not None
labels = tf.squeeze(labels, 2)
binary_masks = tf.one_hot(labels, self.num_labels, dtype=tf.float32)
image, binary_masks = random_rotation(image, binary_masks, max_angle=30, probability=0.1)
labels = tf.argmax(binary_masks, axis=2, output_type=tf.int32)
image, labels = randomly_crop_and_resize(image, labels, self.image_size, probability=0.9)
image = random_color_manipulations(image, probability=0.1, grayscale_probability=0.05)
image = random_pixel_value_scale(image, probability=0.1, minval=0.9, maxval=1.1)
image, labels = random_flip_left_right(image, labels)
return image, labels
def randomly_crop_and_resize(image, labels, image_size, probability=0.5):
"""
Arguments:
image: a float tensor with shape [height, width, 3].
labels: a float tensor with shape [height, width, 1].
image_size: a list with two integers [new_height, new_width].
probability: a float number.
Returns:
image: a float tensor with shape [new_height, new_width, 3].
labels: a float tensor with shape [new_height, new_width, 1].
"""
height = tf.shape(image)[0]
width = tf.shape(image)[1]
def get_random_window():
crop_size = tf.random_uniform([], MIN_CROP_SIZE, 1.0)
crop_size_y = tf.to_int32(MIN_CROP_SIZE * tf.to_float(height))
crop_size_x = tf.to_int32(MIN_CROP_SIZE * tf.to_float(width))
y = tf.random_uniform([], 0, height - crop_size_y, dtype=tf.int32)
x = tf.random_uniform([], 0, width - crop_size_x, dtype=tf.int32)
crop_window = tf.stack([y, x, crop_size_y, crop_size_x])
return crop_window
whole_image_window = tf.stack([0, 0, height, width])
do_it = tf.less(tf.random_uniform([]), probability)
window = tf.cond(
do_it, lambda: get_random_window(),
lambda: whole_image_window
)
image = tf.image.crop_to_bounding_box(image, window[0], window[1], window[2], window[3])
labels = tf.image.crop_to_bounding_box(labels, window[0], window[1], window[2], window[3])
image = tf.image.resize_images(image, image_size, method=RESIZE_METHOD)
labels = tf.image.resize_images(labels, image_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image, labels
def random_flip_left_right(image, labels):
def flip(image, labels):
flipped_image = tf.image.flip_left_right(image)
flipped_labels = tf.image.flip_left_right(labels)
return flipped_image, flipped_labels
with tf.name_scope('random_flip_left_right'):
do_it = tf.less(tf.random_uniform([]), 0.5)
image, labels = tf.cond(
do_it,
lambda: flip(image, labels),
lambda: (image, labels)
)
return image, labels
| [
"[email protected]"
] | |
bcd750d560e214053a6f3ae7100412b22b224db0 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/template/test_button.py | bfdb9352767ec5880116473e8ab445ab30b8a0a5 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 5,386 | py | """The tests for the Template button platform."""
import datetime as dt
from unittest.mock import patch
from homeassistant import setup
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.template.button import DEFAULT_NAME
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_FRIENDLY_NAME,
CONF_ICON,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import async_get
from tests.common import assert_setup_component
_TEST_BUTTON = "button.template_button"
_TEST_OPTIONS_BUTTON = "button.test"
async def test_missing_optional_config(hass: HomeAssistant) -> None:
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"button": {
"press": {"service": "script.press"},
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN)
async def test_missing_required_keys(hass: HomeAssistant) -> None:
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{"template": {"button": {}}},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("button") == []
async def test_all_optional_config(hass: HomeAssistant, calls) -> None:
"""Test: including all optional templates is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "test",
"button": {
"press": {
"service": "test.automation",
"data_template": {"caller": "{{ this.entity_id }}"},
},
"device_class": "restart",
"unique_id": "test",
"name": "test",
"icon": "mdi:test",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(
hass,
STATE_UNKNOWN,
{
CONF_DEVICE_CLASS: "restart",
CONF_FRIENDLY_NAME: "test",
CONF_ICON: "mdi:test",
},
_TEST_OPTIONS_BUTTON,
)
now = dt.datetime.now(dt.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{CONF_ENTITY_ID: _TEST_OPTIONS_BUTTON},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data["caller"] == _TEST_OPTIONS_BUTTON
_verify(
hass,
now.isoformat(),
{
CONF_DEVICE_CLASS: "restart",
CONF_FRIENDLY_NAME: "test",
CONF_ICON: "mdi:test",
},
_TEST_OPTIONS_BUTTON,
)
er = async_get(hass)
assert er.async_get_entity_id("button", "template", "test-test")
async def test_name_template(hass: HomeAssistant) -> None:
"""Test: name template."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"button": {
"press": {"service": "script.press"},
"name": "Button {{ 1 + 1 }}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(
hass,
STATE_UNKNOWN,
{
CONF_FRIENDLY_NAME: "Button 2",
},
"button.button_2",
)
async def test_unique_id(hass: HomeAssistant) -> None:
"""Test: unique id is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "test",
"button": {
"press": {"service": "script.press"},
"unique_id": "test",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN)
def _verify(
hass,
expected_value,
attributes=None,
entity_id=_TEST_BUTTON,
):
"""Verify button's state."""
attributes = attributes or {}
if CONF_FRIENDLY_NAME not in attributes:
attributes[CONF_FRIENDLY_NAME] = DEFAULT_NAME
state = hass.states.get(entity_id)
assert state.state == expected_value
assert state.attributes == attributes
| [
"[email protected]"
] | |
0f51ca5b879fb84ad265dfd8702b8672392376d4 | b3d42d863b170f2a952e69c40bea727a92d95730 | /c03/p050_same_name.py | 2dfe30b61af6c12638fdf656a5dcee8b56269429 | [
"Apache-2.0"
] | permissive | HiAwesome/automate-the-boring-stuff | 6e8a8aa12fc36f9d7c139f1c2ae0510df1904491 | 7355f53bbb0d27755fe350b84be834f3b4f9767a | refs/heads/master | 2023-01-04T14:24:53.624590 | 2020-11-05T06:30:43 | 2020-11-05T06:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | def spam():
eggs = 'spam local'
print(eggs)
def bacon():
eggs = 'bacon local'
print(eggs)
spam()
print(eggs)
eggs = 'global'
bacon()
print(eggs)
"""
bacon local
spam local
bacon local
global
"""
| [
"[email protected]"
] | |
3422f9944919abe99c858b9cd797912712273538 | 86a184949f306c94b6d032cc2ca862412016d25a | /week10/funwithcelery/tasks.py | bdf6f5df56935b6973921ab1d6f00e02ca4f8a79 | [] | no_license | sjl421/Web-Development-with-Django | 7339b8f4c33f1a1bfa7660e6dc35d74f54c1981d | 2bb9e0fc098b03cbb6e7980483624b151601d204 | refs/heads/master | 2021-04-06T08:58:22.803534 | 2017-07-08T13:59:50 | 2017-07-08T13:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from celery import group
from celery_app import app
@app.task(bind=True, max_retries=1)
def retry_task(self):
print('IN TASK')
exc = Exception('retrying')
self.retry(exc=exc, countdown=60)
@app.task
def add(x, y):
return x + y
@app.task
def just_printing(*args, **kwargs):
print('Someone called me: just_printing')
print(args, kwargs)
@app.task
def group_adds(ns):
return group(add.s(*n) for n in ns)()
| [
"[email protected]"
] | |
3ad8fdd6adedf14fe1f304fc8af7d2c003990f4d | 88ed561f202a4c75f499611ec86099000f53b683 | /ccxt_unmerged/krakenfu.py | 13d6c8c2a6d001008482e039cf6819536a6d6eae | [] | no_license | jjones2000/ccxt-unmerged | 8d07d4cddfd9b3f71223d5baeb92f7dac6b7f11d | 316f39b836fa15bd9693de292ba62eb224227ea2 | refs/heads/master | 2023-06-17T09:19:12.827803 | 2021-07-14T19:25:50 | 2021-07-14T19:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,029 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import OrderNotFillable
from ccxt.base.errors import DuplicateOrderId
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
class krakenfu(Exchange):
def describe(self):
return self.deep_extend(super(krakenfu, self).describe(), {
'id': 'krakenfu',
'name': 'Kraken Futures',
'countries': ['US'],
'version': 'v3',
'userAgent': None,
'rateLimit': 600,
'has': {
'cancelAllOrders': True,
'createMarketOrder': False,
'editOrder': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrders': False,
'fetchTicker': False,
'fetchTickers': True,
},
'urls': {
'test': {
'public': 'https://demo-futures.kraken.com/derivatives',
'private': 'https://demo-futures.kraken.com/derivatives',
'www': 'https://demo-futures.kraken.com',
},
'logo': 'https://user-images.githubusercontent.com/24300605/81436764-b22fd580-9172-11ea-9703-742783e6376d.jpg',
'api': {
'public': 'https://futures.kraken.com/derivatives',
'private': 'https://futures.kraken.com/derivatives',
},
'www': 'https://futures.kraken.com/',
'doc': [
'https://support.kraken.com/hc/en-us/categories/360001806372-Futures-API',
],
'fees': 'https://support.kraken.com/hc/en-us/articles/360022835771-Transaction-fees-and-rebates-for-Kraken-Futures',
'referral': None,
},
'api': {
'public': {
'get': [
'instruments',
'orderbook',
'tickers',
'history',
],
},
'private': {
'get': [
'openpositions',
'notifications',
'accounts',
'openorders',
'recentorders',
'fills',
'transfers',
],
'post': [
'sendorder',
'editorder',
'cancelorder',
'transfer',
'batchorder',
'cancelallorders',
'cancelallordersafter',
'withdrawal', # for futures wallet -> kraken spot wallet
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': -0.0002,
'taker': 0.00075,
},
},
'exceptions': {
'exact': {
'apiLimitExceeded': RateLimitExceeded,
'marketUnavailable': ExchangeNotAvailable,
'requiredArgumentMissing': BadRequest,
'unavailable': ExchangeNotAvailable,
'authenticationError': AuthenticationError,
'accountInactive': ExchangeError, # When account has no trade history / no order history. Should self error be ignored in some cases?
'invalidAccount': BadRequest, # the fromAccount or the toAccount are invalid
'invalidAmount': BadRequest,
'insufficientFunds': InsufficientFunds,
'Bad Request': BadRequest, # The URL contains invalid characters.(Please encode the json URL parameter)
'Unavailable': InsufficientFunds, # Insufficient funds in Futures account [withdraw]
},
'broad': {
'invalidArgument': BadRequest,
'nonceBelowThreshold': InvalidNonce,
'nonceDuplicate': InvalidNonce,
},
},
'precisionMode': TICK_SIZE,
'options': {
'symbol': {
'quoteIds': ['USD', 'XBT'],
'reversed': False,
},
'orderTypes': {
'limit': 'lmt',
'stop': 'stp',
'IOC': 'ioc',
},
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstruments(params)
# {
# "result":"success",
# "instruments":[
# {
# "symbol":"fi_ethusd_180928",
# "type":"futures_inverse", # futures_vanilla # spot index
# "underlying":"rr_ethusd",
# "lastTradingTime":"2018-09-28T15:00:00.000Z",
# "tickSize":0.1,
# "contractSize":1,
# "tradeable":true,
# "marginLevels":[
# {
# "contracts":0,
# "initialMargin":0.02,
# "maintenanceMargin":0.01
# },
# {
# "contracts":250000,
# "initialMargin":0.04,
# "maintenanceMargin":0.02
# },
# {
# "contracts":500000,
# "initialMargin":0.06,
# "maintenanceMargin":0.03
# }
# ]
# },
# ...
# {
# "symbol":"in_xbtusd",
# "type":"spot index",
# "tradeable":false
# }
# ],
# "serverTime":"2018-07-19T11:32:39.433Z"
# }
instruments = response['instruments']
result = []
for i in range(0, len(instruments)):
market = instruments[i]
active = True
id = market['symbol']
type = None
index = (market['type'].find(' index') >= 0)
linear = None
inverse = None
if not index:
linear = (market['type'].find('_vanilla') >= 0)
inverse = not linear
settleTime = self.safe_string(market, 'lastTradingTime')
type = 'swap' if (settleTime is None) else 'future'
else:
type = 'index'
swap = (type == 'swap')
future = (type == 'future')
symbol = id
split = id.split('_')
parsed = self.parse_symbol_id_joined(split[1])
baseId = parsed['baseId']
quoteId = parsed['quoteId']
base = parsed['base']
quote = parsed['quote']
# swap == perpetual
if swap:
symbol = base + '/' + quote
lotSize = self.safe_float(market, 'contractSize')
precision = {
'amount': None,
'price': self.safe_float(market, 'tickSize'),
}
if not index:
precision['amount'] = 1.0 # self seems to be the case for all markets
limits = {
'amount': {
'min': precision['amount'],
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'type': type,
'spot': False,
'swap': swap,
'future': future,
'prediction': False,
'linear': linear,
'inverse': inverse,
'lotSize': lotSize,
'info': market,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetOrderbook(self.extend(request, params))
# {
# "result":"success",
# "serverTime":"2016-02-25T09:45:53.818Z",
# "orderBook":{
# "bids":[
# [
# 4213,
# 2000,
# ],
# [
# 4210,
# 4000,
# ],
# ...,
# ],
# "asks":[
# [
# 4218,
# 4000,
# ],
# [
# 4220,
# 5000,
# ],
# ...,
# ],
# },
# }
timestamp = self.parse8601(response['serverTime'])
return self.parse_order_book(response['orderBook'], timestamp)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
tickers = response['tickers']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
return result
def parse_ticker(self, ticker, market=None):
# {
# "tag":"quarter",
# "pair":"XRP:USD",
# "symbol":"fi_xrpusd_180615",
# "markPrice":0.8036,
# "bid":0.8154,
# "bidSize":15000,
# "ask":0.8166,
# "askSize":45000,
# "vol24h":5314577,
# "openInterest":3807948,
# "open24h":0.82890000,
# "last":0.814,
# "lastTime":"2018-05-10T17:14:29.301Z",
# "lastSize":1000,
# "suspended":false
# }
symbol = None
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(ticker, 'lastTime'))
open = self.safe_float(ticker, 'open24h')
last = self.safe_float(ticker, 'last')
change = None
percentage = None
average = None
if last is not None and open is not None:
change = last - open
if open > 0:
percentage = change / open * 100
average = (open + last) / 2
volume = self.safe_float(ticker, 'vol24h')
baseVolume = None
quoteVolume = None
if (market is not None) and (market['type'] != 'index'):
if market['linear']:
baseVolume = volume # pv_xrpxbt volume given in XRP
else:
quoteVolume = volume # pi_xbtusd volume given in USD
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': self.safe_float(ticker, 'bidSize'),
'ask': self.safe_float(ticker, 'ask'),
'askVolume': self.safe_float(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
# Returns the last 100 trades from the specified lastTime value
response = self.publicGetHistory(self.extend(request, params))
return self.parse_trades(response['history'], market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "time":"2019-02-14T09:25:33.920Z",
# "trade_id":100,
# "price":3574,
# "size":100,
# "side":"buy",
# "type":"fill" # fill, liquidation, assignment, termination
# "uid":"11c3d82c-9e70-4fe9-8115-f643f1b162d4"
# }
#
# fetchMyTrades(private)
#
# {
# "fillTime":"2016-02-25T09:47:01.000Z",
# "order_id":"c18f0c17-9971-40e6-8e5b-10df05d422f0",
# "fill_id":"522d4e08-96e7-4b44-9694-bfaea8fe215e",
# "cliOrdId":"d427f920-ec55-4c18-ba95-5fe241513b30", # OPTIONAL
# "symbol":"fi_xbtusd_180615",
# "side":"buy",
# "size":2000,
# "price":4255,
# "fillType":"maker" # taker, takerAfterEdit, maker, liquidation, assignee
# },
#
# execution report(createOrder, editOrder)
# {
# "executionId":"e1ec9f63-2338-4c44-b40a-43486c6732d7",
# "price":7244.5,
# "amount":10,
# "orderPriorEdit":null,
# "orderPriorExecution":{
# "orderId":"61ca5732-3478-42fe-8362-abbfd9465294",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":10,
# "filled":0,
# "limitPrice":7500,
# "reduceOnly":false,
# "timestamp":"2019-12-11T17:17:33.888Z",
# "lastUpdateTimestamp":"2019-12-11T17:17:33.888Z"
# },
# "takerReducedQuantity":null,
# "type":"EXECUTION"
# }
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'fillTime'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'amount', 0.0)
id = self.safe_string_2(trade, 'uid', 'fill_id')
if id is None:
id = self.safe_string(trade, 'executionId')
order = self.safe_string(trade, 'order_id')
symbolId = self.safe_string(trade, 'symbol')
side = self.safe_string(trade, 'side')
type = None
priorEdit = self.safe_value(trade, 'orderPriorEdit')
priorExecution = self.safe_value(trade, 'orderPriorExecution')
if priorExecution is not None:
order = self.safe_string(priorExecution, 'orderId')
symbolId = self.safe_string(priorExecution, 'symbol')
side = self.safe_string(priorExecution, 'side')
type = self.safe_string(priorExecution, 'type')
elif priorEdit is not None:
order = self.safe_string(priorEdit, 'orderId')
symbolId = self.safe_string(priorEdit, 'symbol')
side = self.safe_string(priorEdit, 'type')
type = self.safe_string(priorEdit, 'type')
if type is not None:
type = self.parse_order_type(type)
symbol = None
if symbolId is not None:
if symbolId in self.markets_by_id:
market = self.markets_by_id[symbolId]
else:
market = None
symbol = symbolId
if market is not None:
symbol = market['symbol']
cost = None
if (amount is not None) and (price is not None) and (market is not None):
if market['linear']:
cost = amount * price # in quote
else:
cost = amount / price # in base
cost *= market['lotSize']
fee = None
takerOrMaker = None
fillType = self.safe_string(trade, 'fillType')
if fillType is not None:
if fillType.find('taker') >= 0:
takerOrMaker = 'taker'
elif fillType.find('maker') >= 0:
takerOrMaker = 'maker'
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'fee': fee,
'info': trade,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
# type string 'lmt'/'limit' for a limit order
# 'post' for a post-only limit order
# 'stp'/'stop' for a stop order
# 'take_profit' for a take profit order
# 'ioc' for an immediate-or-cancel order
# stopPrice float The stop price associated with a stop or take profit order.
# Required if orderType is stp or take_profit. Must not have
# more than 2 decimal places. Note that for stop orders, limitPrice denotes
# the worst price at which the stop or take_profit order can get filled at.
# If no limitPrice is provided the stop or take_profit order will trigger a market order.
# triggerSignal string If placing a stp or take_profit, the signal used for trigger. One of:
# mark - the mark price
# index - the index price
# last - the last executed trade
# cliOrdId UUID The order identity that is specified from the user. It must be globally unique.
# reduceOnly string Set as True if you wish the order to only reduce an existing position.
# Any order which increases an existing position will be rejected. Default False.
self.load_markets()
typeId = self.safe_string(self.options['orderTypes'], type, type)
request = {
'orderType': typeId,
'symbol': self.market_id(symbol),
'side': side,
'size': amount,
}
if price is not None:
request['limitPrice'] = price
response = self.privatePostSendorder(self.extend(request, params))
status = self.safe_string(response['sendStatus'], 'status')
self.verify_order_action_success(status, 'placed', ['filled'])
order = self.parse_order(response['sendStatus'])
id = self.safe_string(order, 'id')
self.orders[id] = order
return self.extend({'info': response}, order)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
if amount is not None:
request['size'] = amount
if price is not None:
request['limitPrice'] = price
response = self.privatePostEditorder(self.extend(request, params))
status = self.safe_string(response['editStatus'], 'status')
self.verify_order_action_success(status, 'edited', ['filled'])
order = self.parse_order(response['editStatus'])
self.orders[order['id']] = order
return self.extend({'info': response}, order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privatePostCancelorder(self.extend({'order_id': id}, params))
status = self.safe_string(self.safe_value(response, 'cancelStatus', {}), 'status')
self.verify_order_action_success(status, 'canceled')
order = {}
if 'cancelStatus' in response:
order = self.parse_order(response['cancelStatus'])
self.orders[order['id']] = order
return self.extend({'info': response}, order)
def cancel_all_orders(self, symbol=None, params={}):
request = {}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
response = self.privatePostCancelallorders(self.extend(request, params))
cancelStatus = self.safe_value(response, 'cancelStatus', {})
cancelledOrders = self.safe_value(cancelStatus, 'cancelledOrders', [])
for i in range(0, len(cancelledOrders)):
id = self.safe_string(cancelledOrders[i], 'order_id')
if id in self.orders:
self.orders[id]['status'] = 'canceled'
self.orders[id]['remaining'] = 0.0
return response
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
# The returned orderEvents are yet again in entirely different format, what a mess
raise NotSupported(self.id + ' fetchOrders not supprted yet')
# self.load_markets()
# market = None
# request = {}
# if symbol is not None:
# market = self.market(symbol)
# request['symbol'] = market['id']
# }
# response = self.privateGetRecentorders(self.extend(request, params))
# return self.parse_orders([response], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = self.privateGetOpenorders(params)
return self.parse_orders(response['openOrders'], market, since, limit)
def parse_order_type(self, orderType):
map = {
'lmt': 'limit',
'stp': 'stop',
}
return self.safe_string(map, orderType, orderType)
def verify_order_action_success(self, status, action='placed/edited/canceled', omit=[]):
errors = {
'invalidOrderType': InvalidOrder,
'invalidSide': InvalidOrder,
'invalidSize': InvalidOrder,
'invalidPrice': InvalidOrder,
'insufficientAvailableFunds': InsufficientFunds,
'selfFill': ExchangeError,
'tooManySmallOrders': ExchangeError,
'maxPositionViolation': BadRequest,
'marketSuspended': ExchangeNotAvailable,
'marketInactive': ExchangeNotAvailable,
'clientOrderIdAlreadyExist': DuplicateOrderId,
'clientOrderIdTooLong': BadRequest,
'outsidePriceCollar': InvalidOrder,
'postWouldExecute': OrderImmediatelyFillable, # the unplaced order could actually be parsed(with status = "rejected"), but there is self specific error for self
'iocWouldNotExecute': OrderNotFillable, # -||-
'wouldNotReducePosition': ExchangeError,
'orderForEditNotFound': OrderNotFound,
'orderForEditNotAStop': InvalidOrder,
'filled': OrderNotFound,
'notFound': OrderNotFound,
}
if (status in errors) and not self.in_array(status, omit):
raise errors[status](self.id + ' order cannot be ' + action + ': ' + status)
def parse_order_status(self, status):
statuses = {
'placed': 'open', # the order was placed successfully
'cancelled': 'canceled', # the order was cancelled successfully
'invalidOrderType': 'rejected', # the order was not placed because orderType is invalid
'invalidSide': 'rejected', # the order was not placed because side is invalid
'invalidSize': 'rejected', # the order was not placed because size is invalid
'invalidPrice': 'rejected', # the order was not placed because limitPrice and/or stopPrice are invalid
'insufficientAvailableFunds': 'rejected', # the order was not placed because available funds are insufficient
'selfFill': 'rejected', # the order was not placed because it would be filled against an existing order belonging to the same account
'tooManySmallOrders': 'rejected', # the order was not placed because the number of small open orders would exceed the permissible limit
'maxPositionViolation': 'rejected', # Order would cause you to exceed your maximum position in self contract.
'marketSuspended': 'rejected', # the order was not placed because the market is suspended
'marketInactive': 'rejected', # the order was not placed because the market is inactive
'clientOrderIdAlreadyExist': 'rejected', # the specified client id already exist
'clientOrderIdTooLong': 'rejected', # the client id is longer than the permissible limit
'outsidePriceCollar': 'rejected', # the limit order crosses the spread but is an order of magnitude away from the mark price - fat finger control
# Should the next two be 'expired' ?
'postWouldExecute': 'rejected', # the post-only order would be filled upon placement, thus is cancelled
'iocWouldNotExecute': 'rejected', # the immediate-or-cancel order would not execute.
'wouldNotReducePosition': 'rejected', # the reduce only order would not reduce position.
'edited': 'open', # the order was edited successfully
'orderForEditNotFound': 'rejected', # the requested order for edit has not been found
'orderForEditNotAStop': 'rejected', # the supplied stopPrice cannot be applied because order is not a stop order
'filled': 'closed', # the order was found completely filled and could not be cancelled
'notFound': 'rejected', # the order was not found, either because it had already been cancelled or it never existed
'untouched': 'open', # the entire size of the order is unfilled
'partiallyFilled': 'open', # the size of the order is partially but not entirely filled
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
# "PLACE ORDER"
#
# LIMIT
# {
# "order_id":"179f9af8-e45e-469d-b3e9-2fd4675cb7d0",
# "status":"placed",
# "receivedTime":"2019-09-05T16:33:50.734Z",
# "orderEvents":[
# {
# "order":{
# "orderId":"179f9af8-e45e-469d-b3e9-2fd4675cb7d0",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":10000,
# "filled":0,
# "limitPrice":9400,
# "reduceOnly":false,
# "timestamp":"2019-09-05T16:33:50.734Z",
# "lastUpdateTimestamp":"2019-09-05T16:33:50.734Z"
# },
# "reducedQuantity":null,
# "type":"PLACE"
# }
# ]
# }
#
# LIMIT REJECTED
# {
# "order_id":"614a5298-0071-450f-83c6-0617ce8c6bc4",
# "status":"wouldNotReducePosition",
# "receivedTime":"2019-09-05T16:32:54.076Z",
# "orderEvents":[
# {
# "uid":"614a5298-0071-450f-83c6-0617ce8c6bc4",
# "order":{
# "orderId":"614a5298-0071-450f-83c6-0617ce8c6bc4",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":10000,
# "filled":0,
# "limitPrice":9400,
# "reduceOnly":true,
# "timestamp":"2019-09-05T16:32:54.076Z",
# "lastUpdateTimestamp":"2019-09-05T16:32:54.076Z"
# },
# "reason":"WOULD_NOT_REDUCE_POSITION",
# "type":"REJECT"
# }
# ]
# }
#
# CONDITIONAL
# {
# "order_id":"1abfd3c6-af93-4b30-91cc-e4a93797f3f5",
# "status":"placed",
# "receivedTime":"2019-12-05T10:20:50.701Z",
# "orderEvents":[
# {
# "orderTrigger":{
# "uid":"1abfd3c6-af93-4b30-91cc-e4a93797f3f5",
# "clientId":null,
# "type":"lmt", # "ioc" if stop market
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":10,
# "limitPrice":15000,
# "triggerPrice":9500,
# "triggerSide":"trigger_below",
# "triggerSignal":"mark_price",
# "reduceOnly":false,
# "timestamp":"2019-12-05T10:20:50.701Z",
# "lastUpdateTimestamp":"2019-12-05T10:20:50.701Z"
# },
# "type":"PLACE"
# }
# ]
# }
#
# EXECUTION
# {
# "order_id":"61ca5732-3478-42fe-8362-abbfd9465294",
# "status":"placed",
# "receivedTime":"2019-12-11T17:17:33.888Z",
# "orderEvents":[
# {
# "executionId":"e1ec9f63-2338-4c44-b40a-43486c6732d7",
# "price":7244.5,
# "amount":10,
# "orderPriorEdit":null,
# "orderPriorExecution":{
# "orderId":"61ca5732-3478-42fe-8362-abbfd9465294",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":10,
# "filled":0,
# "limitPrice":7500,
# "reduceOnly":false,
# "timestamp":"2019-12-11T17:17:33.888Z",
# "lastUpdateTimestamp":"2019-12-11T17:17:33.888Z"
# },
# "takerReducedQuantity":null,
# "type":"EXECUTION"
# }
# ]
# }
#
# "EDIT ORDER"
# {
# "status":"edited",
# "orderId":"022774bc-2c4a-4f26-9317-436c8d85746d",
# "receivedTime":"2019-09-05T16:47:47.521Z",
# "orderEvents":[
# {
# "old":{
# "orderId":"022774bc-2c4a-4f26-9317-436c8d85746d",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":1000,
# "filled":0,
# "limitPrice":9400.0,
# "reduceOnly":false,
# "timestamp":"2019-09-05T16:41:35.173Z",
# "lastUpdateTimestamp":"2019-09-05T16:41:35.173Z"
# },
# "new":{
# "orderId":"022774bc-2c4a-4f26-9317-436c8d85746d",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":1501,
# "filled":0,
# "limitPrice":7200,
# "reduceOnly":false,
# "timestamp":"2019-09-05T16:41:35.173Z",
# "lastUpdateTimestamp":"2019-09-05T16:47:47.519Z"
# },
# "reducedQuantity":null,
# "type":"EDIT"
# }
# ]
# }
#
# "CANCEL ORDER"
# {
# "status":"cancelled",
# "orderEvents":[
# {
# "uid":"85c40002-3f20-4e87-9302-262626c3531b",
# "order":{
# "orderId":"85c40002-3f20-4e87-9302-262626c3531b",
# "cliOrdId":null,
# "type":"lmt",
# "symbol":"pi_xbtusd",
# "side":"buy",
# "quantity":1000,
# "filled":0,
# "limitPrice":10144,
# "stopPrice":null,
# "reduceOnly":false,
# "timestamp":"2019-08-01T15:26:27.790Z"
# },
# "type":"CANCEL"
# }
# ]
# }
#
# "FETCH OPEN ORDERS"
# {
# "order_id":"59302619-41d2-4f0b-941f-7e7914760ad3",
# "symbol":"pi_xbtusd",
# "side":"sell",
# "orderType":"lmt",
# "limitPrice":10640,
# "unfilledSize":304,
# "receivedTime":"2019-09-05T17:01:17.410Z",
# "status":"untouched",
# "filledSize":0,
# "reduceOnly":true,
# "lastUpdateTime":"2019-09-05T17:01:17.410Z"
# }
#
orderEvents = self.safe_value(order, 'orderEvents', [])
details = None
isPrior = False
fixed = False
statusId = None
price = None
trades = []
if len(orderEvents) > 0:
executions = []
for i in range(0, len(orderEvents)):
item = orderEvents[i]
if self.safe_string(item, 'type') == 'EXECUTION':
executions.append(item)
# Final order(after placement / editing / execution / canceling)
if ('new' in item) or ('order' in item) or ('orderTrigger' in item):
details = self.safe_value_2(item, 'new', 'order')
if details is None:
details = item['orderTrigger']
isPrior = False
fixed = True
elif (('orderPriorEdit' in item) or ('orderPriorExecution' in item)) and (not fixed) and (details is None):
details = self.safe_value_2(item, 'orderPriorExecution', 'orderPriorEdit')
if 'orderPriorExecution' in item:
price = self.safe_float(item['orderPriorExecution'], 'limitPrice')
isPrior = True
trades = self.parse_trades(executions)
statusId = self.safe_string(order, 'status')
if details is None:
details = order
if statusId is None:
statusId = self.safe_string(details, 'status')
# This may be incorrectly marked as "open" if only execution report is given,
# but will be fixed below
status = self.parse_order_status(statusId)
isClosed = self.in_array(status, ['canceled', 'rejected', 'closed'])
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = self.safe_string(details, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string_2(details, 'timestamp', 'receivedTime'))
lastTradeTimestamp = None
if price is None:
price = self.safe_float(details, 'limitPrice')
amount = self.safe_float(details, 'quantity')
filled = self.safe_float_2(details, 'filledSize', 'filled', 0.0)
remaining = self.safe_float(details, 'unfilledSize')
average = None
filled2 = 0.0
if len(trades) > 0:
vwapSum = 0.0
for i in range(0, len(trades)):
trade = trades[i]
filled2 += trade['amount']
vwapSum += trade['amount'] * trade['price']
average = vwapSum / filled2
if (amount is not None) and (not isClosed) and isPrior and (filled2 >= amount):
status = 'closed'
isClosed = True
if isPrior:
filled = filled + filled2
else:
filled = max(filled, filled2)
if remaining is None:
if isPrior:
if amount is not None:
# remaining amount before execution minus executed amount
remaining = amount - filled2
else:
remaining = amount
# if fetchOpenOrders are parsed
if (amount is None) and (not isPrior) and (remaining is not None):
amount = filled + remaining
cost = None
if (filled is not None) and (market is not None):
whichPrice = average if (average is not None) else price
if whichPrice is not None:
if market['linear']:
cost = filled * whichPrice # in quote
else:
cost = filled / whichPrice # in base
cost *= market['lotSize']
id = self.safe_string_2(order, 'order_id', 'orderId')
if id is None:
id = self.safe_string_2(details, 'orderId', 'uid')
type = self.parse_order_type(self.safe_string_lower_2(details, 'type', 'orderType'))
side = self.safe_string(details, 'side')
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': trades,
'info': order,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = self.privateGetFills(params)
# {
# "result":"success",
# "serverTime":"2016-02-25T09:45:53.818Z",
# "fills":[
# {
# "fillTime":"2016-02-25T09:47:01.000Z",
# "order_id":"c18f0c17-9971-40e6-8e5b-10df05d422f0",
# "fill_id":"522d4e08-96e7-4b44-9694-bfaea8fe215e",
# "cliOrdId":"d427f920-ec55-4c18-ba95-5fe241513b30", # EXTRA
# "symbol":"fi_xbtusd_180615",
# "side":"buy",
# "size":2000,
# "price":4255,
# "fillType":"maker"
# },
# ...
# ]
# }
return self.parse_trades(response['fills'], market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
# {
# "result":"success",
# "serverTime":"2016-02-25T09:45:53.818Z",
# "accounts":{
# "cash":{
# "type":"cashAccount",
# "balances":{
# "xbt":141.31756797,
# "xrp":52465.1254,
# },
# },
# "fi_xbtusd":{
# "type":"marginAccount",
# "currency":"xbt",
# "balances":{
# "fi_xbtusd_171215":50000,
# "fi_xbtusd_180615":-15000,
# ...,
# "xbt":141.31756797,
# "xrp":0,
# },
# "auxiliary":{
# "af":100.73891563,
# "pnl":12.42134766,
# "pv":153.73891563,
# },
# "marginRequirements":{
# "im":52.8,
# "mm":23.76,
# "lt":39.6,
# "tt":15.84,
# },
# "triggerEstimates":{
# "im":3110,
# "mm":3000,
# "lt":2890,
# "tt":2830,
# },
# },
# ...
# },
# }
result = {'info': response}
accounts = self.safe_value(response, 'accounts', {})
cash = self.safe_value(accounts, 'cash', {})
cashBalances = self.safe_value(cash, 'balances', {})
# This contains the actually usable margin by each market,
# but ccxt does not support such format
# bySymbol = self.omit(accounts, 'cash')
currencyIds = list(cashBalances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(cashBalances, currencyId)
result[code] = account
return self.parse_balance(result)
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
message = self.safe_string(response, 'error')
if message is None:
return
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def parse_symbol_id_joined(self, symbolId):
# Convert by detecting and converting currencies in symbol
symbolIdLower = symbolId.lower()
quoteIds = self.options['symbol']['quoteIds']
reversed = self.options['symbol']['reversed']
method = 'startsWith' if reversed else 'endsWith'
quoteId = None
baseId = None
for i in range(0, len(quoteIds)):
if getattr(self, method)(symbolIdLower, quoteIds[i].lower()):
quoteId = quoteIds[i]
break
if quoteId is None:
raise BadSymbol(self.id + ' symbolId could not be parsed: ' + symbolId)
if not reversed:
baseIdLength = len(symbolId) - len(quoteId)
baseId = self.slice_string(symbolId, 0, baseIdLength)
quoteId = self.slice_string(symbolId, baseIdLength)
else:
quoteId = self.slice_string(symbolId, 0, len(quoteId))
baseId = self.slice_string(symbolId, len(quoteId))
return {
'baseId': baseId,
'quoteId': quoteId,
'base': self.safe_currency_code(baseId),
'quote': self.safe_currency_code(quoteId),
}
def starts_with(self, string, x):
return self.slice_string(string, 0, len(x)) == x
def ends_with(self, string, x):
start = max(0, len(string) - len(x))
return self.slice_string(string, start) == x
def slice_string(self, string, start=None, end=None):
if start is None:
start = 0
if end is None:
end = len(string)
return string[start:end]
def nonce(self):
return str(self.milliseconds())
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/api/' + self.version + '/' + path
query = endpoint
postData = ''
if params:
postData = self.urlencode(params)
query += '?' + postData
url = self.urls['api'][api] + query
if api == 'private':
nonce = '' # self.nonce()
auth = postData + nonce + endpoint # 1
hash = self.hash(self.encode(auth), 'sha256', 'binary') # 2
secret = self.base64_to_binary(self.secret) # 3
signature = self.hmac(hash, secret, hashlib.sha512, 'base64') # 4-5
headers = {
'Content-Type': 'application/json',
'APIKey': self.apiKey,
'Authent': signature,
}
# headers['Nonce'] = nonce
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| [
"[email protected]"
] | |
89e7da5910d0897f44e727aec7f25b1c0b510972 | 1511782b2cc3dcf1f7e058e5046ec67a5561ba51 | /2020/0418/we_like_agc.py | cf32c1fd0e83497a7048d6cb9fac03b9d62bda85 | [] | no_license | keiouok/atcoder | 7d8a053b0cf5b42e71e265450121d1ad686fee6d | 9af301c6d63b0c2db60ac8af5bbe1431e14bb289 | refs/heads/master | 2021-09-07T11:48:55.953252 | 2021-07-31T15:29:50 | 2021-07-31T15:29:50 | 186,214,079 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from heapq import heapify, heappop, heappush
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
N = INT()
def no_agc(last4):
for i in range(4):
cp_last4 = list(last4)
if i >= 1:
cp_last4[i-1], cp_last4[i] = \
cp_last4[i], cp_last4[i-1]
cp_last_sent = "".join(cp_last4)
if cp_last_sent.count("AGC") >= 1:
return False
return True
# q = deque([(0, "TTT")])
ans = 0
memo = [{} for i in range(N+1)]
def dfs(idx, last3):
# idx, last3 = q.popleft()
if last3 in memo[idx]:
return memo[idx][last3]
if idx == N:
return 1
ret = 0
for c in "AGCT":
if no_agc(last3 + c):
last3_new = (last3 + c)[1:]
idx_new = idx + 1
ret = (ret + dfs(idx_new, last3_new)) % mod
memo[idx][last3] = ret
return ret
print(dfs(0, "TTT"))
| [
"[email protected]"
] | |
b1ca43ca364d54f0cadbe79241cefb980e2b7c7c | f08e50d55bbbb90e4c8f9a8811eaede98ede2694 | /erpbee/patches/v11_0/inter_state_field_for_gst.py | 2a8ab6e86b41209abb6664006436ad67db52618b | [] | no_license | mohrezbak/erpbee | bc48472a99a7f4357aa7b82ff3a9c1a4c98ba017 | 1134156ad337fd472e14cf347479c17bd8db7b33 | refs/heads/main | 2023-02-12T01:32:07.858555 | 2021-01-08T17:25:23 | 2021-01-08T17:25:23 | 327,872,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | from __future__ import unicode_literals
import frappe
from erpbee.regional.india.setup import make_custom_fields
def execute():
company = frappe.get_all('Company', filters = {'country': 'India'})
if not company:
return
frappe.reload_doc("Payroll", "doctype", "Employee Tax Exemption Declaration")
frappe.reload_doc("Payroll", "doctype", "Employee Tax Exemption Proof Submission")
frappe.reload_doc("hr", "doctype", "Employee Grade")
frappe.reload_doc("hr", "doctype", "Leave Policy")
frappe.reload_doc("accounts", "doctype", "Bank Account")
frappe.reload_doc("accounts", "doctype", "Tax Withholding Category")
frappe.reload_doc("accounts", "doctype", "Allowed To Transact With")
frappe.reload_doc("accounts", "doctype", "Finance Book")
frappe.reload_doc("accounts", "doctype", "Loyalty Program")
frappe.reload_doc("stock", "doctype", "Item Barcode")
make_custom_fields()
frappe.reload_doc("accounts", "doctype", "sales_taxes_and_charges")
frappe.reload_doc("accounts", "doctype", "purchase_taxes_and_charges")
frappe.reload_doc("accounts", "doctype", "sales_taxes_and_charges_template")
frappe.reload_doc("accounts", "doctype", "purchase_taxes_and_charges_template")
# set is_inter_state in Taxes And Charges Templates
if frappe.db.has_column("Sales Taxes and Charges Template", "is_inter_state") and\
frappe.db.has_column("Purchase Taxes and Charges Template", "is_inter_state"):
igst_accounts = set(frappe.db.sql_list('''SELECT igst_account from `tabGST Account` WHERE parent = "GST Settings"'''))
cgst_accounts = set(frappe.db.sql_list('''SELECT cgst_account FROM `tabGST Account` WHERE parenttype = "GST Settings"'''))
when_then_sales = get_formatted_data("Sales Taxes and Charges", igst_accounts, cgst_accounts)
when_then_purchase = get_formatted_data("Purchase Taxes and Charges", igst_accounts, cgst_accounts)
if when_then_sales:
frappe.db.sql('''update `tabSales Taxes and Charges Template`
set is_inter_state = Case {when_then} Else 0 End
'''.format(when_then=" ".join(when_then_sales)))
if when_then_purchase:
frappe.db.sql('''update `tabPurchase Taxes and Charges Template`
set is_inter_state = Case {when_then} Else 0 End
'''.format(when_then=" ".join(when_then_purchase)))
def get_formatted_data(doctype, igst_accounts, cgst_accounts):
# fetch all the rows data from child table
all_details = frappe.db.sql('''
select parent, account_head from `tab{doctype}`
where parenttype="{doctype} Template"'''.format(doctype=doctype), as_dict=True)
# group the data in the form "parent: [list of accounts]""
group_detail = {}
for i in all_details:
if not i['parent'] in group_detail: group_detail[i['parent']] = []
for j in all_details:
if i['parent']==j['parent']:
group_detail[i['parent']].append(j['account_head'])
# form when_then condition based on - if list of accounts for a document
# matches any account in igst_accounts list and not matches any in cgst_accounts list
when_then = []
for i in group_detail:
temp = set(group_detail[i])
if not temp.isdisjoint(igst_accounts) and temp.isdisjoint(cgst_accounts):
when_then.append('''When name='{name}' Then 1'''.format(name=i))
return when_then
| [
"[email protected]"
] | |
1c97e19f27526310fc8e6f703b6657e7e3961dc6 | cbcdf195338307b0c9756549a9bffebf3890a657 | /mypy_django_plugin/transformers/models.py | 5db4dbb2c09d863b1f38034dda14dc60a3395ca9 | [
"MIT"
] | permissive | mattbasta/django-stubs | bc482edf5c6cdf33b85005c2638484049c52851b | 8978ad471f2cec0aa74256fe491e2e07887f1006 | refs/heads/master | 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 | MIT | 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null | UTF-8 | Python | false | false | 10,969 | py | from abc import ABCMeta, abstractmethod
from typing import Dict, Iterator, List, Optional, Tuple, cast
import dataclasses
from mypy.nodes import (
ARG_STAR, ARG_STAR2, MDEF, Argument, CallExpr, ClassDef, Expression, IndexExpr, Lvalue, MemberExpr, MypyFile,
NameExpr, StrExpr, SymbolTableNode, TypeInfo, Var,
)
from mypy.plugin import ClassDefContext
from mypy.plugins.common import add_method
from mypy.semanal import SemanticAnalyzerPass2
from mypy.types import AnyType, Instance, NoneTyp, TypeOfAny
from mypy_django_plugin import helpers
@dataclasses.dataclass
class ModelClassInitializer(metaclass=ABCMeta):
api: SemanticAnalyzerPass2
model_classdef: ClassDef
@classmethod
def from_ctx(cls, ctx: ClassDefContext):
return cls(api=cast(SemanticAnalyzerPass2, ctx.api), model_classdef=ctx.cls)
def get_meta_attribute(self, name: str) -> Optional[Expression]:
meta_node = helpers.get_nested_meta_node_for_current_class(self.model_classdef.info)
if meta_node is None:
return None
return helpers.get_assigned_value_for_class(meta_node, name)
def is_abstract_model(self) -> bool:
is_abstract_expr = self.get_meta_attribute('abstract')
if is_abstract_expr is None:
return False
return self.api.parse_bool(is_abstract_expr)
def add_new_node_to_model_class(self, name: str, typ: Instance) -> None:
var = Var(name=name, type=typ)
var.info = typ.type
var._fullname = self.model_classdef.info.fullname() + '.' + name
var.is_inferred = True
var.is_initialized_in_class = True
self.model_classdef.info.names[name] = SymbolTableNode(MDEF, var, plugin_generated=True)
@abstractmethod
def run(self) -> None:
raise NotImplementedError()
def iter_call_assignments(klass: ClassDef) -> Iterator[Tuple[Lvalue, CallExpr]]:
for lvalue, rvalue in helpers.iter_over_assignments(klass):
if isinstance(rvalue, CallExpr):
yield lvalue, rvalue
def iter_over_one_to_n_related_fields(klass: ClassDef) -> Iterator[Tuple[NameExpr, CallExpr]]:
for lvalue, rvalue in iter_call_assignments(klass):
if (isinstance(lvalue, NameExpr)
and isinstance(rvalue.callee, MemberExpr)):
if rvalue.callee.fullname in {helpers.FOREIGN_KEY_FULLNAME,
helpers.ONETOONE_FIELD_FULLNAME}:
yield lvalue, rvalue
class SetIdAttrsForRelatedFields(ModelClassInitializer):
def run(self) -> None:
for lvalue, rvalue in iter_over_one_to_n_related_fields(self.model_classdef):
node_name = lvalue.name + '_id'
self.add_new_node_to_model_class(name=node_name,
typ=self.api.builtin_type('builtins.int'))
class InjectAnyAsBaseForNestedMeta(ModelClassInitializer):
def run(self) -> None:
meta_node = helpers.get_nested_meta_node_for_current_class(self.model_classdef.info)
if meta_node is None:
return None
meta_node.fallback_to_any = True
def get_model_argument(manager_info: TypeInfo) -> Optional[Instance]:
for base in manager_info.bases:
if base.args:
model_arg = base.args[0]
if isinstance(model_arg, Instance) and model_arg.type.has_base(helpers.MODEL_CLASS_FULLNAME):
return model_arg
return None
class AddDefaultObjectsManager(ModelClassInitializer):
def add_new_manager(self, name: str, manager_type: Optional[Instance]) -> None:
if manager_type is None:
return None
self.add_new_node_to_model_class(name, manager_type)
def add_private_default_manager(self, manager_type: Optional[Instance]) -> None:
if manager_type is None:
return None
self.add_new_node_to_model_class('_default_manager', manager_type)
def get_existing_managers(self) -> List[Tuple[str, TypeInfo]]:
managers = []
for base in self.model_classdef.info.mro:
for name_expr, member_expr in iter_call_assignments(base.defn):
manager_name = name_expr.name
callee_expr = member_expr.callee
if isinstance(callee_expr, IndexExpr):
callee_expr = callee_expr.analyzed.expr
if isinstance(callee_expr, (MemberExpr, NameExpr)) \
and isinstance(callee_expr.node, TypeInfo) \
and callee_expr.node.has_base(helpers.BASE_MANAGER_CLASS_FULLNAME):
managers.append((manager_name, callee_expr.node))
return managers
def run(self) -> None:
existing_managers = self.get_existing_managers()
if existing_managers:
first_manager_type = None
for manager_name, manager_type_info in existing_managers:
manager_type = Instance(manager_type_info, args=[Instance(self.model_classdef.info, [])])
self.add_new_manager(name=manager_name, manager_type=manager_type)
if first_manager_type is None:
first_manager_type = manager_type
else:
if self.is_abstract_model():
# abstract models do not need 'objects' queryset
return None
first_manager_type = self.api.named_type_or_none(helpers.MANAGER_CLASS_FULLNAME,
args=[Instance(self.model_classdef.info, [])])
self.add_new_manager('objects', manager_type=first_manager_type)
if self.is_abstract_model():
return None
default_manager_name_expr = self.get_meta_attribute('default_manager_name')
if isinstance(default_manager_name_expr, StrExpr):
self.add_private_default_manager(self.model_classdef.info.get(default_manager_name_expr.value).type)
else:
self.add_private_default_manager(first_manager_type)
class AddIdAttributeIfPrimaryKeyTrueIsNotSet(ModelClassInitializer):
def run(self) -> None:
if self.is_abstract_model():
# no need for .id attr
return None
for _, rvalue in iter_call_assignments(self.model_classdef):
if ('primary_key' in rvalue.arg_names
and self.api.parse_bool(rvalue.args[rvalue.arg_names.index('primary_key')])):
break
else:
self.add_new_node_to_model_class('id', self.api.builtin_type('builtins.object'))
class AddRelatedManagers(ModelClassInitializer):
def run(self) -> None:
for module_name, module_file in self.api.modules.items():
for defn in iter_over_classdefs(module_file):
for lvalue, rvalue in iter_call_assignments(defn):
if is_related_field(rvalue, module_file):
try:
ref_to_fullname = extract_ref_to_fullname(rvalue,
module_file=module_file,
all_modules=self.api.modules)
except helpers.SelfReference:
ref_to_fullname = defn.fullname
except helpers.SameFileModel as exc:
ref_to_fullname = module_name + '.' + exc.model_cls_name
if self.model_classdef.fullname == ref_to_fullname:
related_manager_name = defn.name.lower() + '_set'
if 'related_name' in rvalue.arg_names:
related_name_expr = rvalue.args[rvalue.arg_names.index('related_name')]
if not isinstance(related_name_expr, StrExpr):
return None
related_manager_name = related_name_expr.value
typ = get_related_field_type(rvalue, self.api, defn.info)
if typ is None:
return None
self.add_new_node_to_model_class(related_manager_name, typ)
def iter_over_classdefs(module_file: MypyFile) -> Iterator[ClassDef]:
for defn in module_file.defs:
if isinstance(defn, ClassDef):
yield defn
def get_related_field_type(rvalue: CallExpr, api: SemanticAnalyzerPass2,
related_model_typ: TypeInfo) -> Optional[Instance]:
if rvalue.callee.name in {'ForeignKey', 'ManyToManyField'}:
return api.named_type_or_none(helpers.RELATED_MANAGER_CLASS_FULLNAME,
args=[Instance(related_model_typ, [])])
else:
return Instance(related_model_typ, [])
def is_related_field(expr: CallExpr, module_file: MypyFile) -> bool:
if isinstance(expr.callee, MemberExpr) and isinstance(expr.callee.expr, NameExpr):
module = module_file.names.get(expr.callee.expr.name)
if module \
and module.fullname == 'django.db.models' \
and expr.callee.name in {'ForeignKey',
'OneToOneField',
'ManyToManyField'}:
return True
return False
def extract_ref_to_fullname(rvalue_expr: CallExpr,
module_file: MypyFile, all_modules: Dict[str, MypyFile]) -> Optional[str]:
if 'to' in rvalue_expr.arg_names:
to_expr = rvalue_expr.args[rvalue_expr.arg_names.index('to')]
else:
to_expr = rvalue_expr.args[0]
if isinstance(to_expr, NameExpr):
return module_file.names[to_expr.name].fullname
elif isinstance(to_expr, StrExpr):
typ_fullname = helpers.get_model_fullname_from_string(to_expr.value, all_modules)
if typ_fullname is None:
return None
return typ_fullname
return None
def add_dummy_init_method(ctx: ClassDefContext) -> None:
any = AnyType(TypeOfAny.special_form)
pos_arg = Argument(variable=Var('args', any),
type_annotation=any, initializer=None, kind=ARG_STAR)
kw_arg = Argument(variable=Var('kwargs', any),
type_annotation=any, initializer=None, kind=ARG_STAR2)
add_method(ctx, '__init__', [pos_arg, kw_arg], NoneTyp())
# mark as model class
ctx.cls.info.metadata.setdefault('django', {})['generated_init'] = True
def process_model_class(ctx: ClassDefContext) -> None:
initializers = [
InjectAnyAsBaseForNestedMeta,
AddDefaultObjectsManager,
AddIdAttributeIfPrimaryKeyTrueIsNotSet,
SetIdAttrsForRelatedFields,
AddRelatedManagers,
]
for initializer_cls in initializers:
initializer_cls.from_ctx(ctx).run()
add_dummy_init_method(ctx)
# allow unspecified attributes for now
ctx.cls.info.fallback_to_any = True
| [
"[email protected]"
] | |
6cad1809e4dcda1f27e0304167dbbb5f9ead2b47 | 6d233ad2059a941e4ce4c5b5ee3857b8a4a0d212 | /Everyday_alg/2021/01/2021_01_13/binary-tree-level-order-traversal.py | 2966d1ca6c9df4779c61bf9ec99a75013caf3771 | [] | no_license | Alexanderklau/Algorithm | 7c38af7debbe850dfc7b99cdadbf0f8f89141fc6 | eac05f637a55bfcc342fa9fc4af4e2dd4156ea43 | refs/heads/master | 2022-06-12T21:07:23.635224 | 2022-06-12T08:12:07 | 2022-06-12T08:12:07 | 83,501,915 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # coding: utf-8
__author__ = 'Yemilice_lau'
"""
给你一个二叉树,请你返回其按 层序遍历 得到的节点值。 (即逐层地,从左到右访问所有节点)。
示例:
二叉树:[3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
返回其层序遍历结果:
[
[3],
[9,20],
[15,7]
]
"""
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
tree_lists, tree = [], [root]
while tree:
tree_lists.append([node.val for node in tree])
temp = []
for node in tree:
temp.extend([node.left, node.right])
tree = [leaf for leaf in temp if leaf]
return tree_lists | [
"[email protected]"
] | |
e38fd14ba552b2ae26182d22a08def171dd47456 | fdca7da4bd6a7ce2e6659014da3b11df486ea686 | /names.py | a6eb3a885c30d28098b0058184d5f8f67574a955 | [] | no_license | MovinTarg/Names | a34bafe2f827078873f13f9034299f036b1a20b8 | 2ba00a051560ea7a9cfd12cf95bc6617962b9b0b | refs/heads/master | 2021-05-02T07:20:11.304723 | 2018-02-08T22:32:52 | 2018-02-08T22:32:52 | 120,826,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
def names(arr):
for val in arr:
print val['first_name'], val['last_name']
names(students)
users = {
'Students': [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
],
'Instructors': [
{'first_name' : 'Michael', 'last_name' : 'Choi'},
{'first_name' : 'Martin', 'last_name' : 'Puryear'}
]
}
def studentsInstructors(dictionary):
for key, data in dictionary.items():
print key
# print data
count = 0
for val in data:
count += 1
print count, '-', val['first_name'], val['last_name'], '-', len(val['first_name'] + val['last_name'])
studentsInstructors(users) | [
"[email protected]"
] | |
68f6bb61324cd78cba0386369bec19d3a4ec44a3 | abf0ea1656a00932d99578a566b8b546daa8c569 | /env/bin/cftp | df699cd0cc2f0d301a851ae0138146f3272a4b40 | [] | no_license | Lewington-pitsos/soundcloud | cf3b7a22b6e93b32e1f2a3f50b5ca5aec790bf23 | 5232ddc6d8f745ee91624411f3eecbfea2758c51 | refs/heads/master | 2020-05-16T02:09:24.846277 | 2019-04-22T05:37:54 | 2019-04-22T05:37:54 | 182,618,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/home/lewington/code/python/learn/soundcloud/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.cftp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.