blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4e6efba2acb098ba59cc3d1248893b426802823 | 1ae1bc24fa902e1c293af85eac37e777e4805eb9 | /server/tests/test_get_secret.py | b6d50871b1e24317e0854e960c6b8b9a10402a2d | [
"MIT"
]
| permissive | Excloudx6/simple-one-time-secret | 1745abde5c694707f6136483b5773cc04554e999 | c61c242ce41a7ef0c74e915b312e94d4ee37158c | refs/heads/main | 2023-08-03T07:45:14.826847 | 2021-09-15T17:33:35 | 2021-09-15T17:33:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from datetime import datetime
from uuid import UUID
from fastapi import Response
from server.endpoints import get_secret
def test__get_secret__valid_data__secret_returned():
response = Response()
ret = get_secret(UUID("11111111-1111-4111-a111-111111111111"), response)
assert ret["_id"] == "11111111-1111-4111-a111-111111111111"
assert isinstance(ret["expiration"], datetime)
assert ret["secret"] == "some_encrypted_secret"
def test__get_secret__expired_secret__404():
response = Response()
ret: Response = get_secret(UUID("22222222-2222-4222-a222-222222222222"), response)
assert ret == {"message": "Not Found"}
def test__get_secret__non_existent_secret__404():
response = Response()
ret: Response = get_secret(UUID("33333333-3333-4333-a333-333333333333"), response)
assert ret == {"message": "Not Found"}
| [
"[email protected]"
]
| |
f5dda19da49217fca423a9a350dac6e89131454f | b21571488740a3fb54e4779d6e73c5acff80749c | /analysis/karp_rabin.py | ec57877972c5e3eedd84050c25cf357d253d372b | []
| no_license | nnkennard/review-discourse | 428b1c68cd716c8530c5713db864fdb1f8a99e29 | a7e8012cbd3a229b21cfcbfee55962b2ab140fca | refs/heads/master | 2022-12-12T14:40:16.197293 | 2021-05-24T16:28:00 | 2021-05-24T16:28:00 | 236,221,931 | 0 | 0 | null | 2022-12-08T10:55:54 | 2020-01-25T19:55:34 | Python | UTF-8 | Python | false | false | 3,557 | py | import json
import math
import sys
import openreview_lib as orl
from tqdm import tqdm
WINDOW = 7
Q = 2124749677 # Is this too big for Python int
def myhash(tokens):
tok_str = "".join(tokens)
hash_acc = 0
for i, ch in enumerate(reversed(tok_str)):
hash_acc += math.pow(2, i) * ord(ch)
return hash_acc % Q
def get_hashes(tokens):
return {i:myhash(tokens[i:i+WINDOW]) for i in range(len(tokens) - WINDOW)}
def karp_rabin(tokens_1, tokens_2):
hashes_1 = get_hashes(tokens_1)
hashes_2 = get_hashes(tokens_2)
results = []
for k1, v1 in hashes_1.items():
for k2, v2 in hashes_2.items():
if v1 == v2:
results.append((k1, k2))
return sorted(results)
def find_parent(child_node, forum_map):
for forum_head, forum in forum_map.items():
if child_node in forum.keys():
return forum_head, forum[child_node]
return None
def get_examples_from_nodes_and_map(nodes, forum_map):
chunk_map = {}
pairs = []
for node in nodes:
top_node = node["included_nodes"][0]
ancestor_id, parent_id = find_parent(top_node, forum_map)
for maybe_parent in nodes:
if parent_id in maybe_parent["included_nodes"]:
chunk_map[parent_id] = chunk_tokens(maybe_parent["tokens"])
chunk_map[top_node] = chunk_tokens(node["tokens"])
pairs.append((ancestor_id, top_node, parent_id,))
break
return pairs, chunk_map
def chunk_tokens(tokens):
chunks = []
current_chunk = []
for token in tokens:
if token == "__NEWLINE":
if current_chunk:
chunks.append(current_chunk)
current_chunk = []
else:
current_chunk.append(token)
return chunks
def get_lcs(chunk1, chunk2, start_indices):
current_lcs = []
for start1, start2 in start_indices:
j = 0
while start1 + j < len(chunk1) and start2 + j < len(chunk2):
if chunk1[start1:start1+j] == chunk2[start2:start2+j]:
j += 1
elif j-1 > len(current_lcs):
current_lcs = chunk2[start2:start2+j-1]
return current_lcs
class CommentPair(object):
def __init__(self, ancestor, child_node, parent_node, chunk_map):
child_chunks = chunk_map[child_node]
parent_chunks = chunk_map[parent_node]
child_chunks_mapped = {i:None for i in range(len(child_chunks))}
parent_chunks_mapped = {i:None for i in range(len(parent_chunks))}
lcs_map ={}
for i, child_chunk in enumerate(child_chunks):
for j, parent_chunk in enumerate(parent_chunks):
x = karp_rabin(child_chunk, parent_chunk)
if x:
child_chunks_mapped[i], parent_chunks_mapped[j] = j, i
lcs_map[(i,j)] = child_chunk[x[0][0]:x[0][0]+WINDOW]
assert len(parent_chunks) == len(parent_chunks_mapped)
assert len(child_chunks) == len(child_chunks_mapped)
self.data = {
"child": child_node,
"parent": parent_node,
"ancestor": ancestor,
"child_chunks": child_chunks_mapped,
"parent_chunks": parent_chunks_mapped,
"lcs" : list(lcs_map.values())
}
def main():
forum_info_file, input_file = sys.argv[1:]
with open(input_file, 'r') as f:
nodes = json.loads(f.read())["nodes"]
dataset = orl.get_datasets(forum_info_file, debug=False)["train"]
pairs, chunk_map = get_examples_from_nodes_and_map(nodes, dataset.forum_map)
matches = []
for ancestor, x, y in tqdm(pairs):
cp = CommentPair(ancestor, x, y, chunk_map)
matches.append(cp.data)
with open('kr_ouptut.json', 'w') as f:
f.write(json.dumps(matches))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ad754b71e5640b1150ff4d3331cbc5ee7159f940 | 423b80913a2d7109336cd60ccf6d50947883447d | /euler2.py | 54bb3979f789375378f2dcdc551c57a0f3262a27 | []
| no_license | kaborso/Project-Euler | 3545eef46ce50fb0db2cbd65a1552bbf1ee503ae | 292f8c2d616eb0e14d6c0e2b9279708299e5f43c | refs/heads/master | 2016-09-16T14:08:39.576198 | 2011-06-20T22:05:03 | 2011-06-20T22:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import sys
def sumfib(n):
sum = 0
a, b = 0, 1
while a < n:
if (b % 2 == 0):
sum += b
a, b = b, a + b
return sum
input = long(sys.argv[1])
print sumfib(input) | [
"[email protected]"
]
| |
f8bfb04de2d5971b0475709c0e401cf21b704a3a | e531a27eaad5a6bde184a84205a9da06b4e8e12a | /text/korean.py | fb639e5c55050a925743162b8a7c902387017967 | [
"MIT"
]
| permissive | zldzmfoq12/voice-synthesizer | 5751693cc35ca457eb29e0fe719a2832b861a127 | 2c347de9d64fa42035b21c317112f6b625aa0c25 | refs/heads/master | 2022-12-13T00:01:22.360153 | 2021-04-28T12:14:50 | 2021-04-28T12:14:50 | 200,749,138 | 8 | 1 | NOASSERTION | 2022-12-08T00:48:02 | 2019-08-06T00:45:36 | Python | UTF-8 | Python | false | false | 8,631 | py | # Code based on
import re
import os
import ast
import json
from jamo import hangul_to_jamo, h2j, j2h
from .ko_dictionary import english_dictionary, etc_dictionary
PAD = '_'
EOS = '~'
PUNC = '!\'(),-.:;?'
SPACE = ' '
JAMO_LEADS = "".join([chr(_) for _ in range(0x1100, 0x1113)])
JAMO_VOWELS = "".join([chr(_) for _ in range(0x1161, 0x1176)])
JAMO_TAILS = "".join([chr(_) for _ in range(0x11A8, 0x11C3)])
VALID_CHARS = JAMO_LEADS + JAMO_VOWELS + JAMO_TAILS + PUNC + SPACE
ALL_SYMBOLS = PAD + EOS + VALID_CHARS
char_to_id = {c: i for i, c in enumerate(ALL_SYMBOLS)}
id_to_char = {i: c for i, c in enumerate(ALL_SYMBOLS)}
quote_checker = """([`"'"“‘])(.+?)([`"'"”’])"""
def is_lead(char):
return char in JAMO_LEADS
def is_vowel(char):
return char in JAMO_VOWELS
def is_tail(char):
return char in JAMO_TAILS
def get_mode(char):
if is_lead(char):
return 0
elif is_vowel(char):
return 1
elif is_tail(char):
return 2
else:
return -1
def _get_text_from_candidates(candidates):
if len(candidates) == 0:
return ""
elif len(candidates) == 1:
return _jamo_char_to_hcj(candidates[0])
else:
return j2h(**dict(zip(["lead", "vowel", "tail"], candidates)))
def jamo_to_korean(text):
text = h2j(text)
idx = 0
new_text = ""
candidates = []
while True:
if idx >= len(text):
new_text += _get_text_from_candidates(candidates)
break
char = text[idx]
mode = get_mode(char)
if mode == 0:
new_text += _get_text_from_candidates(candidates)
candidates = [char]
elif mode == -1:
new_text += _get_text_from_candidates(candidates)
new_text += char
candidates = []
else:
candidates.append(char)
idx += 1
return new_text
num_to_kor = {
'0': '영',
'1': '일',
'2': '이',
'3': '삼',
'4': '사',
'5': '오',
'6': '육',
'7': '칠',
'8': '팔',
'9': '구',
}
unit_to_kor1 = {
'%': '퍼센트',
'cm': '센치미터',
'mm': '밀리미터',
'km': '킬로미터',
'kg': '킬로그람',
}
unit_to_kor2 = {
'm': '미터',
}
upper_to_kor = {
'A': '에이',
'B': '비',
'C': '씨',
'D': '디',
'E': '이',
'F': '에프',
'G': '지',
'H': '에이치',
'I': '아이',
'J': '제이',
'K': '케이',
'L': '엘',
'M': '엠',
'N': '엔',
'O': '오',
'P': '피',
'Q': '큐',
'R': '알',
'S': '에스',
'T': '티',
'U': '유',
'V': '브이',
'W': '더블유',
'X': '엑스',
'Y': '와이',
'Z': '지',
}
def compare_sentence_with_jamo(text1, text2):
return h2j(text1) != h2j(text)
def tokenize(text, as_id=False):
text = normalize(text)
tokens = list(hangul_to_jamo(text))
if as_id:
return [char_to_id[token] for token in tokens] + [char_to_id[EOS]]
else:
return [token for token in tokens] + [EOS]
def tokenizer_fn(iterator):
return (token for x in iterator for token in tokenize(x, as_id=False))
def normalize(text):
text = text.strip()
text = re.sub('\(\d+일\)', '', text)
text = re.sub('\([⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+\)', '', text)
text = normalize_with_dictionary(text, etc_dictionary)
text = normalize_english(text)
text = re.sub('[a-zA-Z]+', normalize_upper, text)
text = normalize_quote(text)
text = normalize_number(text)
return text
def normalize_with_dictionary(text, dic):
if any(key in text for key in dic.keys()):
pattern = re.compile('|'.join(re.escape(key) for key in dic.keys()))
return pattern.sub(lambda x: dic[x.group()], text)
else:
return text
def normalize_english(text):
def fn(m):
word = m.group()
if word in english_dictionary:
return english_dictionary.get(word)
else:
return word
text = re.sub("([A-Za-z]+)", fn, text)
return text
def normalize_upper(text):
text = text.group(0)
if all([char.isupper() for char in text]):
return "".join(upper_to_kor[char] for char in text)
else:
return text
def normalize_quote(text):
def fn(found_text):
from nltk import sent_tokenize # NLTK doesn't along with multiprocessing
found_text = found_text.group()
unquoted_text = found_text[1:-1]
sentences = sent_tokenize(unquoted_text)
return " ".join(["'{}'".format(sent) for sent in sentences])
return re.sub(quote_checker, fn, text)
number_checker = "([+-]?\d[\d,]*)[\.]?\d*"
count_checker = "(시|명|가지|살|마리|포기|송이|수|톨|통|점|개|벌|척|채|다발|그루|자루|줄|켤레|그릇|잔|마디|상자|사람|곡|병|판)"
def normalize_number(text):
text = normalize_with_dictionary(text, unit_to_kor1)
text = normalize_with_dictionary(text, unit_to_kor2)
text = re.sub(number_checker + count_checker,
lambda x: number_to_korean(x, True), text)
text = re.sub(number_checker,
lambda x: number_to_korean(x, False), text)
return text
num_to_kor1 = [""] + list("일이삼사오육칠팔구")
num_to_kor2 = [""] + list("만억조경해")
num_to_kor3 = [""] + list("십백천")
#count_to_kor1 = [""] + ["하나","둘","셋","넷","다섯","여섯","일곱","여덟","아홉"]
count_to_kor1 = [""] + ["한","두","세","네","다섯","여섯","일곱","여덟","아홉"]
count_tenth_dict = {
"십": "열",
"두십": "스물",
"세십": "서른",
"네십": "마흔",
"다섯십": "쉰",
"여섯십": "예순",
"일곱십": "일흔",
"여덟십": "여든",
"아홉십": "아흔",
}
def number_to_korean(num_str, is_count=False):
if is_count:
num_str, unit_str = num_str.group(1), num_str.group(2)
else:
num_str, unit_str = num_str.group(), ""
num_str = num_str.replace(',', '')
num = ast.literal_eval(num_str)
if num == 0:
return "영"
check_float = num_str.split('.')
if len(check_float) == 2:
digit_str, float_str = check_float
elif len(check_float) >= 3:
raise Exception(" [!] Wrong number format")
else:
digit_str, float_str = check_float[0], None
if is_count and float_str is not None:
raise Exception(" [!] `is_count` and float number does not fit each other")
digit = int(digit_str)
if digit_str.startswith("-"):
digit, digit_str = abs(digit), str(abs(digit))
kor = ""
size = len(str(digit))
tmp = []
for i, v in enumerate(digit_str, start=1):
v = int(v)
if v != 0:
if is_count:
tmp += count_to_kor1[v]
else:
tmp += num_to_kor1[v]
tmp += num_to_kor3[(size - i) % 4]
if (size - i) % 4 == 0 and len(tmp) != 0:
kor += "".join(tmp)
tmp = []
kor += num_to_kor2[int((size - i) / 4)]
if is_count:
if kor.startswith("한") and len(kor) > 1:
kor = kor[1:]
if any(word in kor for word in count_tenth_dict):
kor = re.sub(
'|'.join(count_tenth_dict.keys()),
lambda x: count_tenth_dict[x.group()], kor)
if not is_count and kor.startswith("일") and len(kor) > 1:
kor = kor[1:]
if float_str is not None:
kor += "쩜 "
kor += re.sub('\d', lambda x: num_to_kor[x.group()], float_str)
if num_str.startswith("+"):
kor = "플러스 " + kor
elif num_str.startswith("-"):
kor = "마이너스 " + kor
return kor + unit_str
if __name__ == "__main__":
def test_normalize(text):
print(text)
print(normalize(text))
print("="*30)
test_normalize("JTBC는 JTBCs를 DY는 A가 Absolute")
test_normalize("오늘(13일) 101마리 강아지가")
test_normalize('"저돌"(猪突) 입니다.')
test_normalize('비대위원장이 지난 1월 이런 말을 했습니다. “난 그냥 산돼지처럼 돌파하는 스타일이다”')
test_normalize("지금은 -12.35%였고 종류는 5가지와 19가지, 그리고 55가지였다")
test_normalize("JTBC는 TH와 K 양이 2017년 9월 12일 오후 12시에 24살이 된다")
| [
"[email protected]"
]
| |
7accaa8ad9e3c45b158dd9537e55e683338dea29 | 70e1159856750f04e58c0ffc3f54d094a4602c07 | /booktest/views.py | 84958fd19d5631e83ebfd2b20bac16190adc186f | []
| no_license | wxp19940506/django_test | 032e78a4eb45eb0c54dbafd43dfd0e463d455bb5 | c586cb62d1bb1a21f3430155b3d82ab7b2a65da6 | refs/heads/master | 2021-05-10T11:52:54.186422 | 2018-01-22T07:55:11 | 2018-01-22T07:55:11 | 118,424,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | from django.shortcuts import render
from django.http import *
from django.template import RequestContext,loader
from .models import *
# Create your views here.
def index(request):
# temp = loader.get_template("booktest/index.html")
#
# return HttpResponse(temp.render())
booklist = BookInfo.objects.all()
context = {'lists':booklist}
return render(request,'booktest/index.html',context)
def show(request,id):
book = BookInfo.objects.get(pk=id)
herolist = book.heroinfo_set.all()
context = {'list':herolist}
return render(request,'booktest/show.html',context)
| [
"[email protected]"
]
| |
98a0e20e32aeea3d66d4be4ae64280c5eb0c2367 | 4963ee575127f4f966db61fbb6900db54538ee07 | /shuttle/migrations/0008_shuttle_route.py | b4ac83cb9d1359ae0e443b22802dfeec7397d945 | []
| no_license | spahan/shuttle | f85dcb954cfe3c90032c32ef1d2d554139a1120e | 6b7e5619410e8d4af41807450028f389dda560d9 | refs/heads/master | 2020-08-04T07:19:28.539156 | 2019-10-02T14:18:20 | 2019-10-02T14:18:20 | 212,053,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 2.2.5 on 2019-09-29 18:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shuttle', '0007_auto_20190929_2034'),
]
operations = [
migrations.AddField(
model_name='shuttle',
name='route',
field=models.URLField(blank=True, null=True),
),
]
| [
"[email protected]"
]
| |
1f4e873eab2fcd41f293bcb80c7e8ae1e5eb4377 | 0019ea5621577ab9a9a694e3ef91d913e981a28e | /missing_data_complementor/__init__.py | 47ea50802fb334e4e0fffda9b4c61d9c2aa1527b | []
| no_license | sungc1/fake-news-framework_Py3 | 676710b3bf7b8feb4c237ffed7d1d280f4967890 | e3552b5bc2a30dbd52ad893ce8dd29aa2242f864 | refs/heads/main | 2023-01-19T23:42:13.294446 | 2020-12-01T18:38:31 | 2020-12-01T18:38:31 | 428,178,049 | 1 | 0 | null | 2021-11-15T08:18:23 | 2021-11-15T08:18:23 | null | UTF-8 | Python | false | false | 47 | py | #
# Created by Aviad on 03-Jun-16 11:40 AM.
# | [
"[email protected]"
]
| |
ae27520913674390e809620c54463d13c4e88d63 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/TOM-Lambda/CS35_IntroPython_GP/day3/intro/11_args.py | 2ec2eca832f454921138650bfb137e422a0c4711 | [
"MIT"
]
| permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,852 | py | # Experiment with positional arguments, arbitrary arguments, and keyword
# arguments.
# Write a function f1 that takes two integer positional arguments and returns
# the sum. This is what you'd consider to be a regular, normal function.
<<<<<<< HEAD
def f1(a, b):
return a + b
=======
def f1(a, b):
return a + b
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
print(f1(1, 2))
# Write a function f2 that takes any number of integer arguments and prints the
# sum. Google for "python arbitrary arguments" and look for "*args"
<<<<<<< HEAD
def f2(*args):
sum = 0
for i in args:
sum += i
return sum
print(f2(1)) # Should print 1
print(f2(1, 3)) # Should print 4
print(f2(1, 4, -12)) # Should print -7
=======
def f2(*args):
sum = 0
for i in args:
sum += i
return sum
print(f2(1)) # Should print 1
print(f2(1, 3)) # Should print 4
print(f2(1, 4, -12)) # Should print -7
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
print(f2(7, 9, 1, 3, 4, 9, 0)) # Should print 33
a = [7, 6, 5, 4]
# What thing do you have to add to make this work?
<<<<<<< HEAD
print(f2(*a)) # Should print 22
=======
print(f2(*a)) # Should print 22
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Write a function f3 that accepts either one or two arguments. If one argument,
# it returns that value plus 1. If two arguments, it returns the sum of the
# arguments. Google "python default arguments" for a hint.
<<<<<<< HEAD
def f3(a, b=1):
return a + b
print(f3(1, 2)) # Should print 3
print(f3(8)) # Should print 9
=======
def f3(a, b=1):
return a + b
print(f3(1, 2)) # Should print 3
print(f3(8)) # Should print 9
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Write a function f4 that accepts an arbitrary number of keyword arguments and
# prints out the keys and values like so:
#
# key: foo, value: bar
# key: baz, value: 12
#
# Google "python keyword arguments".
<<<<<<< HEAD
def f4(**kwargs):
for k, v in kwargs.items():
print(f'key: {k}, value: {v}')
# Alternate:
# for k in kwargs:
# print(f'key: {k}, value: {kwargs[k]}')
=======
def f4(**kwargs):
for k, v in kwargs.items():
print(f"key: {k}, value: {v}")
# Alternate:
# for k in kwargs:
# print(f'key: {k}, value: {kwargs[k]}')
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Should print
# key: a, value: 12
# key: b, value: 30
f4(a=12, b=30)
# Should print
# key: city, value: Berkeley
# key: population, value: 121240
# key: founded, value: "March 23, 1868"
f4(city="Berkeley", population=121240, founded="March 23, 1868")
<<<<<<< HEAD
d = {
"monster": "goblin",
"hp": 3
}
=======
d = {"monster": "goblin", "hp": 3}
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# What thing do you have to add to make this work?
f4(**d)
| [
"[email protected]"
]
| |
3c6dc99ca36a539efb2e696f6b57cbd205a83f8b | ae7ba9c83692cfcb39e95483d84610715930fe9e | /baidu/Paddle/paddle/trainer/tests/config_parser_test.py | 5ca874cec7914a20f79c2c7b1873c5bd04f60dca | [
"Apache-2.0"
]
| permissive | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import parse_config_and_serialize
if __name__ == '__main__':
parse_config_and_serialize('trainer/tests/test_config.conf', '')
parse_config_and_serialize(
'trainer/tests/sample_trainer_config.conf',
'extension_module_name=paddle.trainer.config_parser_extension')
parse_config_and_serialize('gserver/tests/pyDataProvider/trainer.conf', '')
| [
"[email protected]"
]
| |
ff91934bcf4db590a09adef32a78ed3f8b3aab2e | 12a653d0add7e975419c3aa05966f593d31b99c4 | /syplaylist_downloader.py | e537a7faf2272acc73ee0b925a1f844f141d6ea3 | []
| no_license | avinashpai94/playlist_downloader | dd9438a8a5313a9bbb6b24d7ad71cb478e447d57 | 90f42edff6f113e971710481d4d73fb7fea8b352 | refs/heads/master | 2023-02-25T05:58:15.993194 | 2021-02-12T20:32:45 | 2021-02-12T20:32:45 | 297,176,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | from __future__ import unicode_literals
import getopt
import re
import sys
import urllib
from urllib.parse import urlparse
import requests
import youtube_dl
from bs4 import BeautifulSoup
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}],
}
def youtube_download(video_link):
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([video_link])
def youtube_playlist_downloader(url):
html = urllib.request.urlopen(url)
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
for video_id in video_ids:
link = "https://www.youtube.com/watch?v=" + video_id
youtube_download(link)
def spotify_playlist_downloader(url):
playlist_url = url
page = requests.get(playlist_url)
soup = BeautifulSoup(page.content, 'html.parser')
track_selection = 'track-name'
artist_selection = 'artists-albums'
tracks = soup.find_all("span", class_=track_selection)
artists = soup.find_all("span", class_=artist_selection)
contents = []
for track, artist in zip(tracks, artists):
contents.append([track.text, artist.text])
for line in contents:
track, artist = line
video_search = re.sub(r"\s+", " ", track)
search_url = ('https://www.youtube.com/results?search_query={}&page=&utm_source=opensearch'.format(
video_search.replace(' ', '+')))
html = urllib.request.urlopen(search_url)
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
link = "https://www.youtube.com/watch?v=" + video_ids[0]
youtube_download(link)
def main():
try:
url = sys.argv[1]
except getopt.GetoptError:
print("Run as 'python playlist_downloader.py <playlist_url>")
sys.exit()
parsed_uri = urlparse(url)
if parsed_uri.netloc == "www.youtube.com":
youtube_playlist_downloader(url)
else:
spotify_playlist_downloader(url)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
7d8115df6fa61bc6f721bc8db8bd47858dc75982 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/primu.py | 74ed5f7f4b48b1b61044808885c34bd9dce48229 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 255 | py | ii = [('WilbRLW.py', 1), ('WilkJMC3.py', 3), ('ClarGE2.py', 2), ('GellWPT2.py', 1), ('WilkJMC2.py', 1), ('LyelCPG.py', 1), ('SoutRD.py', 3), ('WilkJMC.py', 3), ('WestJIT.py', 1), ('FitzRNS.py', 1), ('DibdTRL.py', 1), ('EvarJSP.py', 1), ('SadlMLP2.py', 1)] | [
"[email protected]"
]
| |
29a3f1ee6f24d5c055b6e71d19a8a65a470c9846 | 31999cc128d2ee577d349e6d1f1f0306aeab4298 | /script/01test_01.py | 67db4101ad1adbfdc4e4fa3e2e487528b27fe34e | []
| no_license | jimozonguo/P_Jenkins20210111 | 10c38cf6053e959dfa33162751fb36fe42d88d7e | 8a72079e63ac473e98075667bfcd82d916ed68b4 | refs/heads/master | 2023-04-08T20:07:54.034449 | 2021-04-01T08:50:13 | 2021-04-01T08:50:13 | 353,633,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #案例:研究pytest框架中的setup_class、teardown_class!
#1.某测试类中只允许一份!
#2.不管测试类中有个N个测试函数,这两个函数都只会运行一次!
#3.setup_class/普通测试函数/teardown_class的执行顺序!
class TestLogin:
def setup_class(self):
print("setup_class")
def teardown_class(self):
print("teardown_class")
def test_01(self):
print("test01")
def test_02(self):
print("test02")
| [
"[email protected]"
]
| |
774fb2e2fbb6aeaf5a787bf6a574383945b0fea9 | 4407d9ce4604d6a5cbbf5cd502146d24ea7c52c2 | /New folder/task1.py | d99e6a976c4bf596d876dabf98072cb55663ae0b | []
| no_license | mstalokdwivedi/task1 | b371c536b0761262a0e7d8ca46265afdf47f57f6 | 82cb08806ffbaf6d5cf35f1a66a32a3ad0575fd9 | refs/heads/main | 2023-02-24T20:35:37.315642 | 2021-01-21T16:30:10 | 2021-01-21T16:30:10 | 327,593,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
url= 'https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv'
s_data = pd.read_csv(url)
s_data.head(10)
s_data.shape
s_data.info()
print("\nMissing values :",s_data.isnull().sum().values.sum())
s_data.describe()
s_data.info()
s_data.plot(kind='scatter',x='Hours',y='Scores');
plt.show()
s_data.corr(method='pearson')
s_data.corr(method='spearman')
hours=s_data['Hours']
Scores=s_data['Scores']
sns.distplot(hours)
sns.distplot(Scores)
x=s_data.iloc[:, :-1].values
y=s_data.iloc[:, 1].values
from sklearn.model_selection import train_test_split
x_train, x_test, y_train ,y_test = train_test_split(x,y,test_size=0.2,random_state=50)
from sklearn.linear_model import LinearRegression
reg=LinearRegression()
reg.fit(x_train,y_train)
m=reg.coef_
c=reg.intercept_
line=m*x+c
plt.scatter(x, y)
plt.plot(x, line);
plt.show()
y_pred=reg.predict(x_test)
actual_predicted=pd.DataFrame({'Target':y_test,'Predicted':y_pred})
actual_predicted
sns.set_style('whitegrid')
sns.distplot(np.array(y_test-y_pred))
plt.show()
hours=np.array(9.25)
hours=hours.reshape(-1,1)
pred=reg.predict(hours)
print("no of hours={}",format(hours))
print("No of Hours={}",format(hours))
print("predicted score ={}",format(pred[0]))
from sklearn import metrics
print("mean absolute error:",metrics.mean_absolute_error(y_test,y_pred))
print('adjusted R square:',metrics.r2_score(y_test,y_pred)) | [
"[email protected]"
]
| |
58c7561bb30c4afd5ffb7dd7f6899788528703c2 | 8f5240f3d27778632aa340fa535e4c8ce48cd756 | /example.py | ada4fc221ea66f2f81b0d679ffc02b82863d0a11 | []
| no_license | jy00295005/ML | 590ac15f9556141269238186289c56b48f80db7b | 210c9ed77d634803e0503cdcbc3c5af10913e579 | refs/heads/master | 2020-05-18T11:05:25.256160 | 2014-10-31T08:23:05 | 2014-10-31T08:23:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | #!/usr/bin/env python
# Copyright 2013 AlchemyAPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from __future__ import print_function
from alchemyapi import AlchemyAPI
import json
demo_text = 'Yesterday dumb Bob destroyed my fancy iPhone in beautiful Denver, Colorado. I guess I will have to head over to the Apple Store and buy a new one.'
# Create the AlchemyAPI Object
alchemyapi = AlchemyAPI()
print('')
print('')
print('')
print('############################################')
print('# Keyword Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.keywords('text', demo_text, {'sentiment': 1})
if response['status'] == 'OK':
print('## Response Object ##')
# print(json.dumps(response, indent=4))
print('')
print('## Keywords ##')
for keyword in response['keywords']:
print('text: ', keyword['text'].encode('utf-8'))
print('relevance: ', keyword['relevance'])
print('sentiment: ', keyword['sentiment']['type'])
if 'score' in keyword['sentiment']:
print('sentiment score: ' + keyword['sentiment']['score'])
print('')
else:
print('Error in keyword extaction call: ', response['statusInfo']) | [
"[email protected]"
]
| |
9d62bfba10a1058c0896956b027b84b51b6a9859 | 0e11f5c196d0536778c466066e0992f844d575c3 | /apps/cate/views.py | 380bed3e49642d96e879205492334cc39f54ef80 | []
| no_license | tonnychina/sjdpro | ab06d51ca2a4286b2df1b3d98d4131b57b2ed212 | 739ec23e1d39548b896277c88d7e4d9f460557e5 | refs/heads/master | 2020-05-21T05:54:49.272264 | 2019-03-16T02:23:20 | 2019-03-16T02:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.response import Response
from rest_framework import status
from .models import *
from .serializers import *
# Create your views here.
class CateViewsets(viewsets.GenericViewSet,mixins.ListModelMixin,mixins.RetrieveModelMixin):
"""
list:
栏目分类
retrieve:
获取单个Id下的所有下级分类
"""
queryset = Cates.objects.all()
serializer_class = CatesSerializers
class CateCaseViewsets(viewsets.GenericViewSet,mixins.RetrieveModelMixin):
"""
retrieve:
获取单个Id下的所有案例
"""
queryset = Cates.objects.all()
serializer_class = CateCaseSerializers
| [
"[email protected]"
]
| |
e4eeae8029b83e01c7c094792e3fa684b7ee6199 | e8e9eb353965e9c4e5a215e3c0dd4361c6f3b83f | /Python/movie-crud/curd/settings.py | 3b4cb69e27ce285175200c1854ed9a68b1f2ebc6 | []
| no_license | Jin-Woong/TIL | 74135c937b7daf3f06af0297601d9596ca90ea8e | a3db43a996458818a8efd1610e62e839c4672d8f | refs/heads/master | 2022-04-27T16:15:08.167151 | 2020-01-20T00:15:17 | 2020-01-20T00:15:17 | 188,746,786 | 0 | 0 | null | 2022-04-22T21:32:05 | 2019-05-27T00:45:14 | Python | UTF-8 | Python | false | false | 3,135 | py | """
Django settings for curd project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n3o72rk@v-8)c%frnzjdvv-61+ivrk%6(=mo!he=7u1a*(sl(@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# Local app
'movies',
# django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'curd.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'curd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
812652e5617f9a47aa6e3f5493cd2f65111596b4 | c3351062731885d604a39856b2f933144cac70bc | /tools/Sikuli/OpenDialogClickFolderSelect.sikuli/OpenDialogClickFolderSelect.py | 2786d858099aaf9188c804348c69087d2b935727 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | Pr-Mex/vanessa-automation | 4efb7924242b70d86f1ef34b48de638602a6d8f3 | e9fcb486384a3c96db8ee7218294550c78bf096c | refs/heads/develop | 2023-09-04T00:48:40.953195 | 2023-08-30T19:28:16 | 2023-08-30T19:28:16 | 133,170,708 | 395 | 356 | BSD-3-Clause | 2023-09-11T17:17:02 | 2018-05-12T17:55:53 | 1C Enterprise | UTF-8 | Python | false | false | 31 | py | click("Bb60pnanxm.png")
exit(0) | [
"[email protected]"
]
| |
ad49890521b48fb274e7238b8242e87c6d3fac1b | 36a7958d7df92be80a35806bf18f5345b0dd5c2b | /pre.py | 23166158b596c4da9175976fa6187ac88d9b6b3e | []
| no_license | natsumeshow/fujitsu_hackathon_2021 | eb9e06be9e6585f51eb73300f1ef6cc0a8b14334 | 412b79c87547371a9654b6049a3db08c2b3984ec | refs/heads/main | 2023-02-27T15:47:22.553212 | 2021-02-06T15:58:16 | 2021-02-06T15:58:16 | 333,308,211 | 0 | 1 | null | 2021-02-04T02:24:12 | 2021-01-27T04:56:58 | CSS | UTF-8 | Python | false | false | 1,419 | py | import cv2
from pathlib import Path
import numpy as np
from dammy import model
def path2id(videoPath):
videoInfo = videoPath.name.split('.')
if len(videoInfo)<1:
return False, False
elif not videoInfo[1] in ['mp4']:
return False, False
else:
return True, videoInfo[0]
def make_landmark(videoPath, landmarkPath):
cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS)
# frames = []
ret, frame = cap.read()
lms = []
while ret:
# frames.append(frame)
ret, frame = cap.read()
lm = np.zeros([18,2])
y = model(frame, 0, (True,True), 4)
for key in y:
lm[key,:] = y[key]
lms.append(lm)
print(np.array(lms).shape)
# print(len(frames))
# print(frames[0].shape)
np.save(landmarkPath, np.array(lms))
dataDir = Path('data')
landmarkDir = Path('landmark')
try:
dataDir.mkdir()
except:
None
try:
landmarkDir.mkdir()
except:
None
for videoPath in dataDir.iterdir():
isVideo, videoId = path2id(videoPath)
if not isVideo:
print('{} is not video ?'.format(videoPath))
continue
landmarkPath = landmarkDir / (videoId + '.npy')
if not landmarkPath.is_file():
make_landmark(str(videoPath), str(landmarkPath))
print('landmark -> {}'.format(landmarkPath))
else:
print('{} is already made'.format(landmarkPath)) | [
"[email protected]"
]
| |
0b2d61e829acf57861e9f91e5531cbffee674fd6 | ba8295dc0d69dc3a70b38141a939f007c8238864 | /python-practice-problems/integersums.py | c3778faae25e821d2780a4ae74f7dd32a2ab0dbe | [
"MIT"
]
| permissive | michael-huber2772/realpython-material | c830954acd813cc2cd15e16516e9ffbf9032b83a | 29b2cc21dcb328e679f3e0ddb8a92a658842181d | refs/heads/main | 2023-05-13T23:48:24.026074 | 2021-06-09T04:39:29 | 2021-06-09T04:39:29 | 375,227,494 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/env python3
""" Sum of Integers Up To n
Write a function, add_it_up, which returns the sum of the integers from 0
to the single integer input parameter.
The function should return 0 if a non-integer is passed in.
"""
import unittest
def add_it_up(n):
try:
result = sum(range(n + 1))
except TypeError:
result = 0
return result
class IntegerSumTestCase(unittest.TestCase):
def test_to_ten(self):
results = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]
for n in range(10):
self.assertEqual(add_it_up(n), results[n])
def test_string(self):
self.assertEqual(add_it_up("testing"), 0)
def test_float(self):
self.assertEqual(add_it_up(0.124), 0)
def test_negative(self):
self.assertEqual(add_it_up(-19), 0)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
8aa6022860474550f15b53fd56bd38684e14aaea | 1e59f50e3abab92ba9ed8156a62aec2731248e78 | /photos/views.py | 2d98a9191c03e70b4d1e4935b4a31a8bad680d18 | [
"MIT"
]
| permissive | kiptoo-rotich/Gallery | 56290325aa0747e0ef9b4e6efe99ced9d961bd28 | 1124701ce3b24bdd9848f3a8e8196fb8188bfee7 | refs/heads/master | 2023-06-16T20:15:41.343778 | 2021-07-05T14:05:57 | 2021-07-05T14:05:57 | 382,277,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | from django.shortcuts import render
from .models import Image,User,Category,Location
import datetime as dt
from django.http import Http404
def photos(request):
photos=Image.objects.all()
date=dt.date.today()
users=User.objects.all()
category=Category.objects.all()
location=Location.objects.all()
return render(request,'main/photos.html',{'photos':photos,'date':date,'category':category,'location':location})
def search(request):
if 'Photo' in request.GET and request.GET['Photo']:
search_term=request.GET.get('Photo')
searched_photo=Category.search(search_term)
message=f"{search_term}"
return render(request,'main/search.html',{"message":message,"photos":searched_photo})
else:
message="You haven't searched for any term"
return render(request,"main/search.html",{"message":message})
def single_photo(request,photo_id):
try:
photo=Image.objects.get(id=photo_id)
image_description=Image.objects.get(id=photo_id)
except DoesNotExist:
raise Http404()
return render(request,'main/photo.html',{'photo':photo,"image_decription":image_description}) | [
"[email protected]"
]
| |
a75a10fc3aaed639fdc8957c9020250fded2d7a3 | 44edd98c9d69e79378bd2b25b1d7c7f6fa95b87f | /new.py | 76023d073af4cfcc8a1a6f316049a2ed3cf6b70d | []
| no_license | UvinduIndumewan/git-test | f2d4b53269e3626948835e5381e80250a1c463cf | 199bae4f6c9b4888a5d810681e14120a8d8fc91a | refs/heads/master | 2022-11-07T17:09:44.805801 | 2020-06-13T12:09:48 | 2020-06-13T12:09:48 | 271,993,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | """hello world"""
"""This is a test subject"""
"""This is test"""
| [
"[email protected]"
]
| |
9aaf6ea0955df89b95dda0aeccfac6b4787de1fc | 5396712984717c2f8b6f1ac90cbe8d8f92a1e88d | /WebProject/route/redis.py | 65ae89ebbd291b1e83c232a6afc0ec5644d2cf52 | []
| no_license | xzhsy/flaskcm | ead7f4b5ea636bfb31b95f399d9c1b71f8a75630 | c43630fb3ec020b57cdd9fcfc05b79827016441f | refs/heads/master | 2020-03-24T22:37:37.809110 | 2019-10-31T11:39:16 | 2019-10-31T11:39:16 | 143,094,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,062 | py | """
Routes and views for the bottle application.
REDIS数据管理部分
"""
from datetime import datetime
from flask import (render_template, request)
from WebProject import app
from WebProject.common import web_helper
from WebProject.models.models import Rediscfg
from WebProject.route import repository
@app.route('/redis')
def redisIndex():
"""Renders the about page."""
# 创建session对象:
session = repository.GetSession()
items = session.query(Rediscfg).all()
return render_template('/redis/redis_index.html',
data=items,
year=datetime.now().year)
@app.route('/redis/append')
def redisAppend():
# 创建对象
data = Rediscfg()
return render_template('/redis/redis_edit.html',
title='Redis 信息',
year=datetime.now().year,
model=data
)
@app.route('/redis/update')
def redisUpdate():
# 创建session对象
session = repository.GetSession()
data = Rediscfg()
data.Redis_Id = request.forms.get('Redis_Id')
data.DomainName = request.forms.get('DomainName')
data.IPAddress = request.forms.get('IPAddress')
data.Username = request.forms.get('Username')
data.Note = request.forms.get('Note')
if(data.Redis_Id == 'None' or data.Redis_Id == None):
data.Redis_Id = None
session.add(data)
session.commit()
else:
session.query(Rediscfg).filter(Rediscfg.Redis_Id == data.Redis_Id).update(
{"DomainName": data.DomainName, "IPAddress": data.IPAddress, "Username": data.Username, "Note": data.Note, }, synchronize_session=False)
session.commit()
return web_helper.return_msg(0, "保存成功!")
@app.route('/redis/edit/<redisId>')
def redisEdit(redisId):
# 创建session对象:
session = repository.GetSession()
redis = session.query(Rediscfg).get(redisId)
return render_template('/redis/redis_edit.html', year=datetime.now().year, model=redis)
| [
"[email protected]"
]
| |
b58032f56c0d13ae8dea75ef3b8fab87a713489d | 19e3072705182e24a6387cdbece4ce6d4797695b | /AndroidAppUploader/venv/bin/tkconch | ca9721301be9eeb68e834b4d60a2e281d125d0ff | []
| no_license | pramodgobburi3/Android-CDN | b83fe019415a1dd4681db4f5c37a8bfebf12b7ec | d01aea8ea3c106fd3a9f4cb670932b7704b34b2c | refs/heads/master | 2020-04-24T06:44:16.043704 | 2019-02-21T01:10:40 | 2019-02-21T01:10:40 | 171,775,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | #!/Users/pramodgobburi/PycharmProjects/AndroidAppUploader/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==18.9.0','console_scripts','tkconch'
__requires__ = 'Twisted==18.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==18.9.0', 'console_scripts', 'tkconch')()
)
| [
"[email protected]"
]
| ||
e7fc2c8eede38ab1d057f2930410a29a6191871a | f14946892dcc62732cffd0dba364d2098e6de607 | /converter.py | 40fb58d5f55a79226720f093675f16897083b36c | []
| no_license | DollaR84/notes | 556368c12b0ead9901b05b95a5691138b588eb86 | a74ec7cf41b842501d1c24ec3b180d76be1fbef1 | refs/heads/master | 2023-03-26T21:04:37.415037 | 2020-06-18T14:11:15 | 2020-06-18T14:11:15 | 223,773,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | """
Converter database from old versions to new.
Created on 19.04.2020
@author: Ruslan Dolovanyuk
"""
from copy import deepcopy
from datetime import datetime
import os
from database import Database
import tables
import updates
class DBConverter:
"""Converter database on new update versions."""
def __init__(self, db_name):
"""initialization converter database."""
self.__db_name = db_name
self.__db = Database()
self.__update_functions = [
'update_db2',
'update_db3',
'update_db4',
'update_db5',
]
def __get_old_data(self, tables_list):
"""Get all data from old database."""
self.__old_data = {table: self.__db.get("SELECT * FROM %s" % table) for table in tables_list}
def checker(self, db, tables_dict):
"""Check and return version input database."""
tables_db = db.get_tables_names()
tables_cr = tables.get_tables_names(tables_dict)
diff_tables = list(set(tables_cr) - set(tables_db))
if not diff_tables:
for table in tables_cr:
columns_db = db.get_columns_names(table)
diff_columns = list(set(tables.get_columns_names(tables_dict[table])) - set(columns_db))
if 'order_sort' in diff_columns:
return 1
elif 'readonly' in diff_columns:
return 2
elif ('date_create' in diff_columns) and ('date_update' in diff_columns):
return 3
elif ('state_check' in diff_columns) and ('state' in diff_columns):
return 4
else:
pass
elif 'states' in diff_tables:
return 4
else:
pass
return tables.VERSION
def __save_old_db(self, db_name, version):
"""Saving old databases before updates."""
date = datetime.strftime(datetime.now(), "%d.%m.%Y")
time = datetime.strftime(datetime.now(), "%H.%M.%S")
try:
os.rename(''.join([db_name, '.db']), ''.join([db_name, '.v{}.'.format(version), date, '.db']))
except:
os.rename(''.join([db_name, '.db']), ''.join([db_name, '.v{}.'.format(version), date, '.', time, '.db']))
def update_db(self, db_ver, tables_dict_default, update_func):
"""Run update database tables."""
self.__db.connect(self.__db_name + '.db')
self.__get_old_data(self.__db.get_tables_names())
self.__db.disconnect()
self.__save_old_db(self.__db_name, db_ver)
self.__db.connect(self.__db_name + '.db')
tables_dict = deepcopy(tables_dict_default)
for table in tables_dict.keys():
tables_dict[table].extend(updates.columns_all(table, db_ver+1))
script = 'CREATE TABLE {} ({}) WITHOUT ROWID'.format(table,
', '.join([' '.join(row) for row in tables_dict[table]]))
self.__db.put(script)
columns = tables.get_columns_names(tables_dict[table])
rows = self.__old_data.get(table, [])
update_func(table, columns, rows)
self.__db.commit()
self.__db.disconnect()
def update_db2(self, table, columns, rows):
"""Update database tables from version database 1 to version 2."""
counter = {}
for row in rows:
if table == 'notes':
parent = row[-1]
if parent not in counter:
counter[parent] = 0
counter[parent] += 1
script = 'INSERT INTO {} ({}) VALUES ({}, {})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]),
counter[parent])
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def update_db3(self, table, columns, rows):
"""Update database tables from version database 2 to version 3."""
for row in rows:
if table == 'notes':
script = 'INSERT INTO {} ({}) VALUES ({}, 0)'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def update_db4(self, table, columns, rows):
"""Update database tables from version database 3 to version 4."""
for row in rows:
if table == 'notes':
script = 'INSERT INTO {} ({}) VALUES ({}, "", "")'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def update_db5(self, table, columns, rows):
"""Update database tables from version database 4 to version 5."""
for row in rows:
if table == 'notes':
script = 'INSERT INTO {} ({}) VALUES ({}, 0, "")'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def check_rows(self, db, tables_dict):
"""Add rows in updates databases."""
for table in list(tables_dict.keys()):
update_dict = updates.ROWS.get(table, {})
for version, rows in update_dict.items():
if version <= tables.VERSION:
if db.get_last_id(table) < int(rows[-1].split(', ')[0]):
columns = tables.get_columns_names(tables_dict[table])
for row in rows:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table, ', '.join(columns), row)
db.put(script)
db.commit()
def run(self, tables_dict_default, tables_dict):
"""Run convert data from old database to new."""
try:
self.__db.connect(self.__db_name + '.db')
db_ver = self.checker(self.__db, tables_dict)
self.__db.disconnect()
for index in range(db_ver-1, tables.VERSION-1):
self.update_db(index+1, tables_dict_default, getattr(self, self.__update_functions[index]))
except Exception as e:
print(e)
return False
return True
def main():
"""Main running this script."""
dbconv = DBConverter('notes')
dbconv.run()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
f3af49040dcee4f510c7bed61a3e84e70e7f951b | 3d1b309bedcdf5fbd8b93c9e615b632ac7c96956 | /day23/solution1.py | 91db7b3aa712855e8487fe3eccccba6dbcc767d4 | []
| no_license | sebschneid/adventofcode2019 | 61877364fece04fe5aed3e5e99e7179da4945d31 | 601e01427c185b910bb94ce2782e4e55593c960a | refs/heads/master | 2020-09-22T22:03:22.201455 | 2019-12-23T22:22:39 | 2019-12-23T22:22:39 | 225,330,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | from typing import Dict, List, NamedTuple, NewType, Optional
import intcode
import network
if __name__ == "__main__":
with open("input.txt") as file:
intcode_string = file.read()
intcode_start = intcode.get_intcode_from_input(intcode_string)
computers = {}
for adress in range(50):
program = intcode.Program(
intcode_start.copy(), inputs=[adress], outputs=[]
)
output = program.run_to_next_input()
program.clear_output()
computers[network.Adress(adress)] = network.NetworkController(
program=program, adress=network.Adress(adress), queue=[]
)
program = intcode.Program(intcode_start.copy(), inputs=[adress], outputs=[])
computers[network.Adress(255)] = network.NatMonitor(program)
test_network = network.Network(computers)
solved = False
while not solved:
for adress, computer in test_network.computers.items():
computer.receive()
if adress != network.Adress(255):
messages = test_network.send(adress, listen=True)
if messages:
for message in messages:
if message.adress == 255:
print(
f"Y value of the first message sent to adreess 255 is {message.packet.y}"
)
solved = True
| [
"[email protected]"
]
| |
4ca9dc4520f70c25ecf171ea6f53ba448000faba | 33f5e287fdc6d8f4798142237255db79d25b084d | /FirstPythonProject/UsingStrings.py | dca6f210ca22c0f3c680aa9645d068cd210149b3 | []
| no_license | comingback2life/LearningPython | 27859f140aee2dde9fbe990f7c53861d6b857d87 | 354318a0ce49096e345ac47dcca543eeb7956f54 | refs/heads/master | 2021-06-23T23:21:06.994686 | 2017-09-10T16:59:12 | 2017-09-10T16:59:12 | 100,623,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | my_name= 'Samip Pokharel'
my_age=18 # This age is not a lie and is true
my_height= 75 # The height is in inches
my_weight=175 # The weight is in lbs
my_eyes= 'Brown'
my_teeth='White'
my_hair='Black'
print "Let's talk about %s" % my_name
print "I am %d inches tall" % my_height
print "I am %d LBS heavy" %my_weight
print "I have %s eyes , %s teeth and %s hair"% (my_eyes,my_teeth,my_hair)
print "My First name is ", my_name[0:5]
my_name= my_name[:6]+ "Dada"
print my_name
| [
"[email protected]"
]
| |
4060c4756bedc230b3f811d7db77fa12fd939ba3 | b02a6441c68303c4e171ca1355d4c4c583a548ec | /aedb-mls/epsilon-compare.py | e815fdf1c6748268aa4ca1d30fbd887f26666b6e | []
| no_license | santiago-iturriaga/itu-maestria | 672da91a1f1fb7abfa0ba1aa24385bc23fdc7b5a | f79e9e6706a8a6bbd7eccf425ee61766e9b4de00 | refs/heads/master | 2020-06-01T07:44:01.550679 | 2015-03-05T02:00:33 | 2015-03-05T02:00:33 | 32,400,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,189 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# epsilon.py
#
# Copyright 2013 Unknown <santiago@marga>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys
import math
import re
from scipy.stats import mstats
from scipy import stats
obj = (0,1,0) # min, max, min
def avg_stdev(values):
sum_v = 0.0
for v in values:
sum_v = sum_v + v
avg = sum_v / len(values)
diff_sq = 0.0
for v in values:
diff_sq = diff_sq + pow(v - avg, 2)
stdev = math.sqrt(diff_sq/(len(values)-1))
return (avg, stdev)
def euclidean_distance(point_a, point_b):
distance = 0.0
assert(len(point_a) == len(point_b))
dimension = len(point_a)
for coord in range(dimension):
distance = distance + pow(point_a[coord]-point_b[coord],2)
return math.sqrt(distance)
def epsilon_metric(ref_pf, comp_pf):
assert(len(ref_pf) > 0)
assert(len(comp_pf) > 0)
min_distances = []
for comp_sol in comp_pf:
min_sol = None
min_sol_dist = None
for ref_sol in ref_pf:
if min_sol is None:
min_sol = ref_sol
min_sol_dist = euclidean_distance(ref_sol, comp_sol)
else:
aux_distance = euclidean_distance(ref_sol, comp_sol)
if aux_distance < min_sol_dist:
min_sol = comp_sol
min_sol_dist = aux_distance
min_distances.append(min_sol_dist)
return max(min_distances)
def epsilon_jmetal_metric(ref_pf, comp_pf):
assert(len(ref_pf) > 0)
assert(len(comp_pf) > 0)
eps = None
for comp_sol in comp_pf:
eps_j = None
for ref_sol in ref_pf:
assert(len(comp_sol)==len(ref_sol))
for k in range(len(comp_sol)):
if obj[k]==0:
eps_temp = ref_sol[k]-comp_sol[k]
else:
eps_temp = comp_sol[k]-ref_sol[k]
if k==0:
eps_k=eps_temp
elif eps_k < eps_temp:
eps_k=eps_temp
if eps_j is None:
eps_j = eps_k
elif eps_j > eps_k:
eps_j = eps_k
if eps is None:
eps = eps_j
elif eps < eps_j:
eps = eps_j
return eps
def main():
if len(sys.argv) != 7:
print("[ERROR] Usage: {0} <best PF> <moea PF> <num. exec.> <computed PF> <num. exec.> <min. coverage>".format(sys.argv[0]))
exit(-1)
best_pf_file = sys.argv[1]
moea_pf_file = sys.argv[2]
moea_num_exec = int(sys.argv[3])
comp_pf_file = sys.argv[4]
comp_num_exec = int(sys.argv[5])
min_cover = float(sys.argv[6])
print("Best PF file : {0}".format(best_pf_file))
print("MOEA PF file : {0} ({1})".format(moea_pf_file, moea_num_exec))
print("Computed PF file: {0} ({1})".format(comp_pf_file, comp_num_exec))
print("Min. coverage : {0}".format(min_cover))
print()
best_pf = []
with open(best_pf_file) as f:
for line in f:
if len(line.strip()) > 0:
data = line.strip().split(" ")
assert(len(data)==3)
if float(data[1]) >= min_cover:
best_pf.append((float(data[0]),float(data[1]),float(data[2])))
moea_pf = []
moea_pf_value = []
for e in range(moea_num_exec):
moea_pf_exec = []
with open(moea_pf_file + "." + str(e)) as f:
for line in f:
if len(line.strip()) > 0:
data = line.strip().split("\t")
if len(data) == 3:
energy = float(data[0])
coverage = -1*float(data[1])
nforwardings = float(data[2])
if coverage > min_cover and energy > 0:
moea_pf_exec.append((energy,coverage,nforwardings))
moea_pf.append(moea_pf_exec)
moea_pf_value.append(epsilon_metric(best_pf, moea_pf_exec))
comp_pf = []
comp_pf_value = []
for e in range(comp_num_exec):
comp_pf_exec = []
with open(comp_pf_file + "." + str(e) + ".out") as f:
for line in f:
if len(line.strip()) > 0:
data = line.strip().split(",")
if len(data) == 10:
if (data[0] != "id"):
energy = float(data[-4])
coverage = float(data[-3])
nforwardings = float(data[-2])
if coverage > min_cover:
comp_pf_exec.append((energy,coverage,nforwardings))
comp_pf.append(comp_pf_exec)
comp_pf_value.append(epsilon_metric(best_pf, comp_pf_exec))
#print ("===================================")
#for i in best_pf:
# print("{0},{1},{2}".format(i[0],i[1],i[2]))
#print ("===================================")
#for i in comp_pf[0]:
# print("{0},{1},{2}".format(i[0],i[1],i[2]))
#print (comp_pf_value[0])
#print ("===================================")
#for i in moea_pf[0]:
# print("{0},{1},{2}".format(i[0],i[1],i[2]))
#print (moea_pf_value[0])
#print ("===================================")
#print(comp_pf_value)
#print(moea_pf_value)
(comp_avg, comp_stdev) = avg_stdev(comp_pf_value)
(moea_avg, moea_stdev) = avg_stdev(moea_pf_value)
(hstatic, pvalue) = mstats.kruskalwallis(moea_pf_value, comp_pf_value)
#(u, prob) = stats.mannwhitneyu(moea_pf_value, comp_pf_value)
print("alg|avg|stdev")
print("comp|{0}|{1}".format(comp_avg, comp_stdev))
print("moea|{0}|{1}".format(moea_avg, moea_stdev))
print("h-static {0}|p-value {1}".format(hstatic, pvalue))
#print("u {0}|prob {1}".format(u, prob))
#0.05 0.01 0.001
if pvalue <= 0.001: print("1x10^-3")
elif pvalue <= 0.01: print("1x10^-2")
elif pvalue <= 0.05: print("5x10^-2")
moea_total = 0
for e in moea_pf:
moea_total = moea_total + len(e)
comp_total = 0
for e in comp_pf:
comp_total = comp_total + len(e)
print("count moea={0} comp={1}".format(moea_total/len(moea_pf), comp_total/len(comp_pf)))
return 0
if __name__ == '__main__':
main()
| [
"santiago.iturriaga@04019538-fc05-46f8-052f-b547bbfd49f0"
]
| santiago.iturriaga@04019538-fc05-46f8-052f-b547bbfd49f0 |
46b48c04fb8bbadb23ad70789b4a6ccc79e2387b | 11074677f25db134ac13345f17ec300524ff442a | /django-ecom/account/models.py | 6687658c6b85fa17cb2396512805fd64d35f18e3 | []
| no_license | leonidBoiko/ecommerce | 508e60bce7b5f849546250b8656dbffc0abe6678 | 13ad80622793b543d8180b081513a257d3418a2c | refs/heads/main | 2023-05-15T10:49:07.934662 | 2021-06-04T11:48:00 | 2021-06-04T11:48:00 | 373,624,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.mail import send_mail
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_countries.fields import CountryField
class CustomAccountManager(BaseUserManager):
def create_superuser(self, email, user_name, password, **other_fields):
other_fields.setdefault("is_staff", True)
other_fields.setdefault("is_superuser", True)
other_fields.setdefault("is_active", True)
if other_fields.get("is_staff") is not True:
raise ValueError("Superuser must be assigned to is_staff=True.")
if other_fields.get("is_superuser") is not True:
raise ValueError(
"Superuser must be assigned to is_superuser=True.")
return self.create_user(email, user_name, password, **other_fields)
def create_user(self, email, user_name, password, **other_fields):
if not email:
raise ValueError(_("You must provide an email address"))
email = self.normalize_email(email)
user = self.model(email=email, user_name=user_name, **other_fields)
user.set_password(password)
user.save()
return user
class UserBase(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_("email address"), unique=True)
user_name = models.CharField(max_length=150, unique=True)
first_name = models.CharField(max_length=150, blank=True)
about = models.TextField(_("about"), max_length=500, blank=True)
# Delivery details
country = CountryField()
phone_number = models.CharField(max_length=15, blank=True)
postcode = models.CharField(max_length=12, blank=True)
address_line_1 = models.CharField(max_length=150, blank=True)
address_line_2 = models.CharField(max_length=150, blank=True)
town_city = models.CharField(max_length=150, blank=True)
# User Status
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
objects = CustomAccountManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["user_name"]
class Meta:
verbose_name = "Accounts"
verbose_name_plural = "Accounts"
def email_user(self, subject, message):
send_mail(
subject,
message,
"[email protected]",
[self.email],
fail_silently=False,
)
def __str__(self):
return self.user_name
| [
"[email protected]"
]
| |
a3aa43118751541e2a307ba24d34db091940fc53 | 85ed87b30b2ee23141898671714daf03910f8635 | /getRandomSamAllHits.py | ca78ad49a5dbf664693e27d64be036dd2240beac | []
| no_license | boruch/RNA-seq-0.1.3 | 92fd6d89985f16ff202f20049cce4ea71f1763a2 | c460810f766db2aa3583739cb6281dcd9fdeab50 | refs/heads/master | 2020-04-29T09:45:27.372852 | 2014-03-27T23:20:54 | 2014-03-27T23:20:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | import sys
def getRandomSamAllHits(sam, numReads):
import random
f=open(sam,"r")
g=open(str("sample_any_"+str(numReads)+"_"+sam),"w")
line=f.readline()
while line[0] is "@":
g.write(line)
line=f.readline()
f.seek(-1,2)
end=f.tell()
ids={}
nb=1000
f.seek(nb,0)
window=end/numReads
while len(ids) < numReads and nb < end:
rbt=random.randrange(1,window)
f.seek(rbt,1)
line=f.readline()
line=f.readline()
nb=f.tell()
id=line
g.write(id)
ids[id]=1
f.close()
g.close()
return
sys.path.append("/u/home/eeskin/charlesb/PIPELINE/RNA-seq-0.1.3/bin")
from getRandomSamAllHits import *
sam=sys.argv[1]
numReads=sys.argv[2]
getRandomSamAllHits(sam,int(numReads))
| [
"[email protected]"
]
| |
44caae0369ae3b0650a6e50733c989c424ea1f2d | 06ee29e29e090ab81c3d9905a8a0bcc639ec387a | /2.Monk and Inversions.py | 583408f5188a4aeef6cc467e44238b9aea79853f | []
| no_license | thisissagnik/HackerEarth_Code_Practice | 9606365e9f819d92838f7bb7422450d8976a6a03 | 9cfaf3dc903d656225e3d68b97a05a7ae12f02a3 | refs/heads/master | 2023-05-27T13:43:53.703762 | 2021-05-29T14:17:57 | 2021-05-29T14:17:57 | 306,936,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | def cnt_inversion(n, arr):
count = 0
index_com = []
for i in range(n):
for j in range(n):
index_com.append((i, j))
for x, y in index_com:
for p, q in index_com:
if x <= p and y <= q:
if arr[x][y] > arr[p][q]:
count += 1
return count
# Driver Program
def main():
t = input()
for _ in range(int(t)):
n = int(input())
arr = []
for _ in range(n):
arr.append(list(map(int, input().strip().split())))
print(cnt_inversion(n, arr))
main()
| [
"[email protected]"
]
| |
d71a9c81d2b1ec632e59baa49eb16b03c6266e56 | f2b150b6ea3da084f5f1e48c817d7f45434cdbdb | /eva_benton/planning_pseudocode_challenge.py | 74ff1e4f25322eb52bdfb2d24577710aaf1794ae | []
| no_license | jaimienakayama/wolfpack_pod_repo | a3ccd2651f271719bad73c24cdc5c7760befbb75 | a3325cba163a4d333c123d74afc548308f834d9c | refs/heads/main | 2023-04-19T05:49:34.207213 | 2021-05-05T23:52:26 | 2021-05-05T23:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,222 | py | '''
Planning & pseudocode challenge!
For each piece here, write out the pseudocode as comments FIRST, then write your code!
At many points, you'll have to choose between using a list vs. dictionary. Justify your choices!
'''
'''
1. Shipping
You are building a system to handle shipping orders. Each order should have 3 pieces of information:
-destination (string) (for the purposes of this challenge this can be any place, no need to make a full address)
-date_shipped (string)
-weight (float) (how many pounds the package is)
Will you choose to make each shipping order as a dictionary or list? Why?
Assign 3 separate orders to 3 separate variables
'''
print('\nPART 1')
'''
Each order will contain a:
destination which will be a string, a
date_shipped which will also be a string, and a
weight which will be a float.
'''
# I am choosing to create a dictionary instead of a listbbecause dictionaries allow me to use lables.
# I am also assigning 3 separate orders to the 3 separate variables created.
order_1 = {'destination' : 'New York','date_shipped' : '03/11/2021','weight' : 5}
order_2 = {'destination' : 'California','date_shipped' : '03/11/2021','weight' : 5}
order_3 = {'destination' : 'Texas','date_shipped' : '03/11/2021','weight' : 5}
print(order_1)
print(order_2)
print(order_3)
'''
2. Building the database
Now, let's put the orders all into a database togther (all the orders are stored in 1 variable).
Your database as a whole can either be a dictionary or a list (hint, you'll want nesting!).
Print out the database to make sure all the info is in there.
'''
print('\nPART 2')
'''I will create a single variable and database to put all of the orders into.
'''
# Here I am creating a database by using 'order_database' as a variable,
# in a list that includes each of the orders to complete the database.
order_database = [order_1, order_2, order_3]
print(order_database)
'''
'''
'''3. Define a function called add_order() that adds one more order to your database,
and make sure it works!
Any new orders should be in the same format as your existing ones.
'''
print('\nPART 3')
# Defining an "add_order" function to facilitate the process of adding more orders,
# to be included in the database as needed using the append function, which will work
# because this a list and not a dictionary.
def add_order(order_database, order):
order_database.append(order)
# Creating two new orders that include all order information same as previous orders.
order_4 = {'destination' : 'Guam','date_shipped' : '03/12/2021','weight' : 10.5}
order_5 = {'destination' : 'Prague','date_shipped' : '03/12/2021','weight' : 10.5}
# Adding the two new orders to the order_database in the same format as the previous orders.
order_database.append(order_4)
order_database.append(order_5)
print(order_database)
'''
4. Define a function called complete_order() to mark a specific order in your database complete
This means you'll need a new piece of data inside the order that is True when the order is complete.
Test this out and print out your database to make sure it works!
HINT: Think about how your choice of list/dictionary in part 2 informs how someone would reference an order in the database
'''
print('\nPART 4')
# Creating a variable called 'complete_order' and defining the complete_order function to designate
# specific orders as completed using booleans True/False.
# I have to create a variable that defines each order's status,
# in the form of a boolean questioning whether order is complete : True or False, if True...
def complete_order(order_database, order):
order_database[order]['complete'] = 'True'
# I am going to print at this point to make sure the coding works before I continue.
print(complete_order)
# I can create a variable that lists all of the complete orders,
# such as 'complete_order_list' and print out a list of all of the completed orders,
# then reference specific orders in that list by item number based on where they exist
# in the new list: ie., [0,1,2,3, etc.].
def complete_order_list(order_database, order):
order_database[order]
# Running to make sure coding works for the variable 'complted_orders_list'.
print(complete_order_list)
| [
"[email protected]"
]
| |
a14be45bebd00cf7e9681b2e864d3db524bca8d5 | d159f0c3774dce7f26db6e5c1035c9358be57ef2 | /SIG2019/function_tests.py | 5cbc30ee8974da6992a0878a5be4ab8a046a27ab | []
| no_license | Susanou/MachineLearning | 77f11d4376052260c85c56989b943c383470e8b5 | 870a4e38777c7ce98965481a70deb7a2026514d4 | refs/heads/master | 2021-07-14T16:46:25.760891 | 2020-07-02T16:12:54 | 2020-07-02T16:12:54 | 176,060,701 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,308 | py | #!/usr/bin/env python3
import mysql.connector
import sys, time
import configparser
import fonctions_reader as fonctions
def test(theme: str):
config = configparser.ConfigParser()
config.read('config.ini')
db = mysql.connector.connect(
host=config['mysqlDB']['host'],
user=config['mysqlDB']['user'],
passwd=config['mysqlDB']['pass'],
db=config['mysqlDB']['db']
)
cursor = db.cursor()
cursor.execute("SELECT * FROM Themes where nom='%s'" % theme)
result = cursor.fetchone()
print(result)
print("\033[0;37;40m Normal text\n")
print("\033[2;37;40m Underlined text\033[0;37;40m \n")
print("\033[1;37;40m Bright Colour\033[0;37;40m \n")
print("\033[3;37;40m Negative Colour\033[0;37;40m \n")
print("\033[5;37;40m Negative Colour\033[0;37;40m\n")
print("\033[1;37;40m \033[2;37:40m TextColour BlackBackground TextColour GreyBackground WhiteText ColouredBackground\033[0;37;40m\n")
print("\033[1;30;40m Dark Gray \033[0m 1;30;40m \033[0;30;47m Black \033[0m 0;30;47m \033[0;37;41m Black \033[0m 0;37;41m")
print("\033[1;31;40m Bright Red \033[0m 1;31;40m \033[0;31;47m Red \033[0m 0;31;47m \033[0;37;42m Black \033[0m 0;37;42m")
print("\033[1;32;40m Bright Green \033[0m 1;32;40m \033[0;32;47m Green \033[0m 0;32;47m \033[0;37;43m Black \033[0m 0;37;43m")
print("\033[1;33;40m Yellow \033[0m 1;33;40m \033[0;33;47m Brown \033[0m 0;33;47m \033[0;37;44m Black \033[0m 0;37;44m")
print("\033[1;34;40m Bright Blue \033[0m 1;34;40m \033[0;34;47m Blue \033[0m 0;34;47m \033[0;37;45m Black \033[0m 0;37;45m")
print("\033[1;35;40m Bright Magenta \033[0m 1;35;40m \033[0;35;47m Magenta \033[0m 0;35;47m \033[0;37;46m Black \033[0m 0;37;46m")
print("\033[1;36;40m Bright Cyan \033[0m 1;36;40m \033[0;36;47m Cyan \033[0m 0;36;47m \033[0;37;47m Black \033[0m 0;37;47m")
print("\033[1;37;40m White \033[0m 1;37;40m \033[0;37;40m Light Grey \033[0m 0;37;40m \033[0;37;48m Black \033[0m 0;37;48m")
def loading_animation(n):
"""Function to animate de waiting time
"""
animation = "|/-\\"
sys.stdout.write("\r[+] Loading " + animation[n % len(animation)])
sys.stdout.flush()
time.sleep(0.5)
return n%len(animation)+1
def connectDB():
"""Fonction utilisee pour se connecter a la base de donnee
Returns
-------
mysql.connector
database object to use for cursor and commits
"""
print("\033[1;32;40m[+] \033[0m Accessing DB")
config = configparser.ConfigParser()
config.read('config.ini')
db = mysql.connector.connect(
host=config['mysqlDB']['host'],
user=config['mysqlDB']['user'],
passwd=config['mysqlDB']['pass'],
db=config['mysqlDB']['db']
)
return db
def query_test(word: str, theme: str):
db = connectDB()
cursor = db.cursor()
occurence_query = ("""
SELECT frequence FROM frequences
where frequences.mot=(select id from word where mot=%s)
and frequences.theme=(select id from themes where nom=%s )
""")
cursor.execute(occurence_query, (word, theme))
freq = cursor.fetchone()[0]
string = ("%s")
print(string % (theme))
total_query = ("""
select n from total
where theme='%s'
""")
cursor.execute(total_query % theme)
total = cursor.fetchone()[0]
print(freq)
print(total)
print("resultat= ", freq/total)
return freq/total, total
def get_all_url():
"""Fonction permettant d'obtenir toutes les URLs sans prendre en compte
le flag
Returns
-------
list
Renvoi la liste des URLs
"""
db = fonctions.connectDB()
cursor = db.cursor()
cursor.execute("SELECT url, cluster FROM url")
urls = cursor.fetchall()
cursor.close()
db.close()
return urls
if __name__ == "__main__":
test = get_all_url()
print(test)
print(test[0])
| [
"[email protected]"
]
| |
c8705454f5b80ca5aca9c2228cd462665605112d | f8e03a0724516b7cc2299f6c7a8cef544fa32484 | /source/pic2card/mystique/group_design_objects.py | e59231d840bf9b34a839f234137c4999867a8772 | [
"MIT"
]
| permissive | isabella232/AdaptiveCards | cc3904f0782bd94087ae0a0df0ee2db954facdde | 766750517196d05f4466941647e07a8a298257b2 | refs/heads/main | 2023-03-07T22:13:55.327587 | 2020-11-17T02:02:15 | 2020-11-17T02:02:15 | 313,699,024 | 0 | 0 | MIT | 2021-02-23T16:14:48 | 2020-11-17T17:51:17 | null | UTF-8 | Python | false | false | 21,437 | py | """Module for grouping deisgn objects into different containers"""
from operator import itemgetter
from typing import List, Dict, Callable, Tuple, Optional
from mystique import config
from mystique.extract_properties import CollectProperties
class GroupObjects:
"""
Handles the grouping of given list of objects for any set conditions that
is passed.
"""
def object_grouping(self, design_objects: List[Dict],
condition: Callable[[Dict, Dict],
bool]) -> List[List[Dict]]:
"""
Groups the given List of design objects for the any given condition.
@param design_objects: objects
@param condition: Grouping condition function
@return: Grouped list of design objects.
"""
groups = []
grouped_positions = []
for ctr1, design_object1 in enumerate(design_objects):
temp_list = []
for ctr2, design_object2 in enumerate(design_objects):
if condition(design_object1, design_object2):
present = False
present_position = -1
append_object = False
append_position = -1
for ctr, gr in enumerate(groups):
if design_object2 in gr:
present = True
present_position = ctr
if design_object1 in gr:
append_object = True
append_position = ctr
if not present and not append_object:
temp_list.append(design_object2)
grouped_positions.append(ctr2)
elif not present and append_object:
groups[append_position].append(design_object2)
grouped_positions.append(ctr2)
elif present and not append_object:
groups[present_position].append(design_object1)
grouped_positions.append(ctr1)
elif (present and append_object and
present_position != append_position):
groups[present_position] += groups[append_position]
del groups[append_position]
if temp_list:
groups.append(temp_list)
for ctr, design_object in enumerate(design_objects):
if ctr not in grouped_positions:
groups.append([design_object])
return groups
class ImageGrouping(GroupObjects):
"""
Groups the image objects of the adaptive card objects into a imagesets or
individual image objects.
"""
# Image objects within the 10px ymin range and 100px range difference are
# grouped into imagesets.
IMAGE_SET_YMIN_RANGE = 10.0
IMAGE_SET_X_RANGE = 100.0
def __init__(self, card_arrange):
self.card_arrange = card_arrange
def imageset_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping image objects into
imagesets
@param design_object1: image object
@param design_object2: image object
@return: boolean value
"""
if design_object1.get("xmin") < design_object2.get("xmin"):
xmax = design_object1.get("xmax")
xmin = design_object2.get("xmin")
else:
xmax = design_object2.get("xmax")
xmin = design_object1.get("xmin")
ymin_diff = abs(
design_object1.get("ymin") - design_object2.get("ymin")
)
x_diff = abs(xmax - xmin)
return (ymin_diff <= self.IMAGE_SET_YMIN_RANGE
and x_diff <= self.IMAGE_SET_X_RANGE)
def group_image_objects(self, image_objects, body, objects, ymins=None,
is_column=None) -> [List, Optional[Tuple]]:
"""
Groups the image objects into imagesets which are in
closer ymin range.
@param image_objects: list of image objects
@param body: list card deisgn elements.
@param ymins: list of ymins of card design
elements
@param objects: list of all design objects
@param is_column: boolean value to check if an object is inside a
columnset or not
@return: List of remaining image objects after the grouping if the
grouping is done outside the columnset container
else returned list of remaining image objects along
with its coordinate values.
"""
# group the image objects based on ymin
groups = self.object_grouping(image_objects, self.imageset_condition)
delete_positions = []
design_object_coords = []
for group in groups:
group = [dict(t) for t in {tuple(d.items()) for d in group}]
# group = self.remove_duplicates(group)
if len(group) > 1:
group = sorted(group, key=lambda i: i["xmin"])
image_set = {
"type": "ImageSet",
"imageSize": "Auto",
"images": []
}
sizes = []
alignment = []
image_xmins = []
for ctr, design_object in enumerate(group):
index = objects.index(design_object)
if index not in delete_positions:
delete_positions.append(index)
sizes.append(design_object.get("size", "Auto"))
alignment.append(design_object.get(
"horizontal_alignment", "Left"))
image_xmins.append(design_object.get("xmin"))
self.card_arrange.append_objects(design_object,
image_set["images"])
image_set["images"] = [x for _, x in sorted(
zip(image_xmins,
image_set["images"]),
key=lambda x: x[0])]
# Assign the imageset's size and alignment property based on
# each image's alignment and size properties inside the imgaeset
image_set["imageSize"] = max(set(sizes), key=sizes.count)
preference_order = ["Left", "Center", "Right"]
if len(alignment) == len(list(set(alignment))):
alignment.sort(key=(preference_order + alignment).index)
image_set["horizontalAlignment"] = alignment[0]
else:
image_set["horizontalAlignment"] = max(set(alignment),
key=alignment.count)
image_set["coords"] = str(group[0].get("coords"))
body.append(image_set)
if ymins:
ymins.append(design_object.get("ymin"))
if is_column:
design_object_coords.append(group[0].get("xmin"))
design_object_coords.append(group[0].get("ymin"))
design_object_coords.append(group[0].get("xmax"))
design_object_coords.append(group[0].get("ymax"))
objects = [design_objects for ctr, design_objects in enumerate(objects)
if ctr not in delete_positions]
if is_column:
return objects, design_object_coords
else:
return objects
class ColumnsGrouping(GroupObjects):
"""
Groups the design objects into different columns of a columnset
"""
def __init__(self, card_arrange):
self.card_arrange = card_arrange
def horizontal_inclusive(self, object_one: Dict, object_two: Dict) -> bool:
"""
Returns the horizonral inclusive condition
@param object_one: design object one
@param object_two: design object two
@return: the boolean value of the inclusive condition
"""
return (((object_one and object_two) and (
(object_one.get("xmin") <= object_two.get(
"xmin") <= object_one.get(
"xmax") and object_one.get(
"xmin") <= object_two.get(
"xmax") <= object_one.get(
"xmax"))
or (object_two.get("xmin") <= object_one.get(
"xmin") <= object_two.get(
"xmax") <= object_one.get("xmax") and
object_two.get(
"xmax") <= object_one.get(
"xmax")
) or (object_one.get(
"xmin") <= object_two.get(
"xmin") <= object_one.get(
"xmax") <= object_two.get(
"xmax") and object_two.get(
"xmax") >= object_one.get("xmin")
))
) or ((object_two and object_one) and
((object_two.get("xmin")
<= object_one.get("xmin")
<= object_two.get("xmax")
and object_two.get("xmin")
<= object_one.get("xmax")
<= object_two.get("xmax"))
or (object_one.get("xmin")
<= object_one.get("xmin")
and object_one.get("xmax")
<= object_two.get("xmax")
and object_two.get("xmin")
<= object_one.get("xmax")
<= object_two.get("xmax"))
or (object_two.get("xmin")
<= object_one.get("xmin")
<= object_two.get("xmax")
<= object_one.get("xmax")
and object_one.get("xmax")
>= object_two.get("xmin"))))
)
def vertical_inclusive(self, object_one: Dict, object_two: Dict) -> bool:
"""
Returns the vertical inclusive condition
@param object_one: design object one
@param object_two: design object two
@return: the boolean value of the inclusive condition
"""
return (
((object_one and object_two) and
((object_one.get("ymin")
<= object_two.get("ymin") <= object_one.get("ymax")
and object_one.get("ymin") <= object_two.get("ymax")
<= object_one.get("ymax"))
or (object_two.get("ymin") <= object_one.get(
"ymin") <= object_two.get(
"ymax") <= object_one.get("ymax")
and object_two.get("ymax") <= object_one.get("ymax"))
or (object_one.get("ymin") <= object_two.get("ymin")
<= object_one.get("ymax") <= object_two.get("ymax")
and object_two.get("ymax") >= object_one.get("ymin"))
))
or ((object_two and object_one)
and ((object_two.get("ymin") <= object_one.get("ymin")
<= object_two.get("ymax") and object_two.get("ymin")
<= object_one.get("ymax") <= object_two.get("ymax"))
or (object_one.get("ymin") <= object_one.get("ymin")
and object_one.get("ymax")
<= object_two.get("ymax")
and object_two.get("ymin")
<= object_one.get("ymax")
<= object_two.get("ymax"))
or (object_two.get("ymin") <= object_one.get("ymin")
<= object_two.get("ymax")
<= object_one.get("ymax")
and object_one.get("ymax")
>= object_two.get("ymin"))
))
)
def max_min_difference(self, design_object1: Dict,
design_object2: Dict, way: str) -> float:
"""
Returns the ymax-ymin difference of the 2 deisgn objects
@param design_object1: design object one
@param design_object2: design object two
@param way: xmax-xmin or ymax-ymin difference
@return: rounded ymax-ymin difference
"""
max = "ymax"
min = "ymin"
if way == "x":
max = "xmax"
min = "xmin"
if design_object1.get(min) < design_object2.get(min):
return round(abs(design_object2.get(min) - design_object1.get(max)))
else:
return round(abs(design_object1.get(min) - design_object2.get(max)))
def columns_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping objects into
columnsets
@param design_object1: design object
@param design_object2: design object
@return: boolean value
"""
y_diff = self.max_min_difference(design_object1, design_object2,
way="y")
object_one = None
object_two = None
if (design_object1.get("object") == "image"
and design_object2.get("object") != "image"):
object_one = design_object1
object_two = design_object2
elif (design_object2.get("object") == "image"
and design_object1.get("object") != "image"):
object_one = design_object2
object_two = design_object1
elif (design_object2.get("object") == "image"
and design_object1.get("object") == "image"):
object_one = design_object1
object_two = design_object2
return (design_object1 != design_object2 and (
(abs(design_object1.get("ymin", 0)
- design_object2.get("ymin", 0))
<= config.COLUMNSET_GROUPING.get("ymin_difference", ""))
or self.vertical_inclusive(object_one, object_two)
or (y_diff <
config.COLUMNSET_GROUPING.get("ymax-ymin_difference", "")
and self.horizontal_inclusive(object_one, object_two)
)))
def columns_row_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping columnset grouped
objects into different columns.
@param design_object1: design object
@param design_object2: design object
@return: boolean value
"""
extract_properites = CollectProperties()
x_diff = self.max_min_difference(design_object1, design_object2,
way="x")
point1 = (design_object1.get("xmin"), design_object1.get("ymin"),
design_object1.get("xmax"), design_object1.get("ymax"))
point2 = (design_object2.get("xmin"), design_object2.get("ymin"),
design_object2.get("xmax"), design_object2.get("ymax"))
if design_object1.get("ymin") < design_object2.get("ymin"):
object_one = design_object1
object_two = design_object2
else:
object_one = design_object2
object_two = design_object1
condition = (design_object1 != design_object2
and ((design_object1.get("object") == "image"
and design_object2.get("object") == "image"
and abs(design_object1.get("ymin")
- design_object2.get("ymin"))
<= config.COLUMNSET_GROUPING.get("ymin_difference")
and x_diff <= config.COLUMNSET_GROUPING.get(
"xmax-xmin_difference", ""))
or self.horizontal_inclusive(object_one, object_two)
)
)
intersection = extract_properites.find_iou(point1, point2,
columns_group=True)[0]
if intersection and point1 != point2:
condition = condition and (
intersection
and (
(object_one.get("xmin") <=
object_two.get("xmin") <= object_one.get("xmax")
and object_one.get("xmin") <=
object_two.get("xmax") <= object_one.get("xmax")
)
or (object_two.get("xmin") <=
object_one.get("xmin") <= object_two.get("xmax")
and object_two.get("xmin") <=
object_one.get("xmax") <= object_two.get("xmax")
)
)
)
return condition
class ChoicesetGrouping(GroupObjects):
"""
Groups the radiobutton objects of the adaptive card objects into a
choiceset or individual radiobuttion objects.
"""
# The design objects are grouped in choicesets based on 2 conditions:
# If the radiobuttons are within the range of 10px of ymax - ymin
# If the radiobuttons are within the rnage of 30px of ymins.
CHOICESET_Y_RANGE = 10
CHOICESET_YMIN_RANGE = 30
def __init__(self, card_arrange):
self.card_arrange = card_arrange
def choiceset_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping radio buttion objects
into choiceset
@param design_object1: image object
@param design_object2: image object
@return: boolean value
"""
design_object1_ymin = float(design_object1.get("ymin"))
design_object2_ymin = float(design_object2.get("ymin"))
difference_in_ymin = abs(design_object1_ymin - design_object2_ymin)
if design_object1_ymin > design_object2_ymin:
difference_in_y = float(
design_object2.get("ymax")) - design_object1_ymin
else:
difference_in_y = float(
design_object1.get("ymax")) - design_object2_ymin
return (abs(difference_in_y) <= self.CHOICESET_Y_RANGE
and difference_in_ymin <= self.CHOICESET_YMIN_RANGE)
def group_choicesets(self, radiobutton_objects: Dict, body: List[Dict],
ymins=None) -> None:
"""
Groups the choice elements into choicesets based on
the closer ymin range
@param radiobutton_objects: list of individual choice
elements
@param body: list of card deisgn elements
@param ymins: list of ymin of deisgn elements
"""
groups = []
radio_buttons = []
if isinstance(radiobutton_objects, dict):
for key, values in radiobutton_objects.items():
radio_buttons.append(values)
radiobutton_objects = radio_buttons
if len(radiobutton_objects) == 1:
# radiobutton_objects = [radiobutton_objects]
groups = [radiobutton_objects]
if not groups:
groups = self.object_grouping(radiobutton_objects,
self.choiceset_condition)
for group in groups:
group = sorted(group, key=itemgetter("ymin"))
choice_set = {
"type": "Input.ChoiceSet",
"choices": [],
"style": "expanded"
}
alignment = []
for design_object in group:
self.card_arrange.append_objects(design_object,
choice_set["choices"])
alignment.append(design_object.get("horizontal_alignment",
"Left"))
preference_order = ["Left", "Center", "Right"]
if len(alignment) == len(list(set(alignment))):
alignment.sort(key=(preference_order + alignment).index)
choice_set["horizontalAlignment"] = alignment[0]
else:
choice_set["horizontalAlignment"] = max(set(alignment),
key=alignment.count)
body.append(choice_set)
if ymins is not None and len(group) > 0:
ymins.append(design_object.get("ymin"))
| [
"[email protected]"
]
| |
8ef6b58674a55f6236df4da9f882ab9310c12fb8 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GstGL/GLMemoryAllocatorClass.py | 09d94c14d0970b096b8877bf3ada06ef684d53ce | []
| no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,176 | py | # encoding: utf-8
# module gi.repository.GstGL
# from /usr/lib64/girepository-1.0/GstGL-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class GLMemoryAllocatorClass(__gi.Struct):
"""
:Constructors:
::
GLMemoryAllocatorClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
copy = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
map = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unmap = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(GLMemoryAllocatorClass), '__module__': 'gi.repository.GstGL', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'GLMemoryAllocatorClass' objects>, '__weakref__': <attribute '__weakref__' of 'GLMemoryAllocatorClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f56a4000bd0>, 'map': <property object at 0x7f56a4000cc0>, 'copy': <property object at 0x7f56a4000db0>, 'unmap': <property object at 0x7f56a4000ea0>, '_padding': <property object at 0x7f56a4000f90>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(GLMemoryAllocatorClass)
| [
"[email protected]"
]
| |
9f3670c4d707a3e54c70d0a55f2059c21cb3d607 | bc39bf7466f06503807bb39366a99ecdd5cab81e | /rdfttl_to_csv.py | 3a92fb8f6c76f113afdd3fd6e763951eeabad5c7 | []
| no_license | SreeSingamsetty/Master-Thesis | cd68e32d243c81865bc2cb4f8c55f1d3f5d43d63 | b6a27acbe1919f07f04194249df22d3d8e5a6f88 | refs/heads/master | 2020-03-18T06:46:24.399820 | 2018-05-22T13:04:22 | 2018-05-22T13:04:22 | 134,414,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from rdflib import Graph
g = Graph()
g.parse("short_abstracts_copy.ttl", format="ttl")
g.serialize("short_abstracts_copy.csv", format="ttl", base="http://dbpedia.org/resource/")
| [
"[email protected]"
]
| |
e3a69ab66c5f9cb1d085346a0716780128beced1 | 7334d7669807d3bf9fe165fe916ca7b1a06f8b7c | /app.py | 4ed8e62dd07a1e26c268baf57efc01886f061091 | []
| no_license | Sreehari-BGK/Tinkerhub_Practicial_AI_Bootcamp_Project | 3a75a414d33328f31a592a273788d9df89b02b57 | 651c7d5bcf3009603c678e10bef21e98fb4f80aa | refs/heads/main | 2023-07-23T13:02:54.599057 | 2021-09-10T07:54:03 | 2021-09-10T07:54:03 | 404,999,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | from __future__ import division, print_function
import sys
import os
import glob
import re
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
from keras.preprocessing import image
from keras import backend as K
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
from scipy.misc import imread, imresize
import tensorflow as tf
import skimage.transform as st
from skimage.transform import resize
app = Flask(__name__)
MODEL_PATH = 'model.h5'
config = tf.ConfigProto(
device_count={'GPU': 1},
intra_op_parallelism_threads=1,
allow_soft_placement=True
)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
session = tf.Session(config=config)
K.set_session(session)
# Load your trained model
model = load_model(MODEL_PATH)
model._make_predict_function() # Necessary
print('Model loaded. Start serving...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
# from keras.applications.resnet50 import ResNet50
# model = ResNet50(weights='imagenet')
graph = tf.get_default_graph() # Change
print('Model loaded. Check http://127.0.0.1:5000/')
# def classify(image, model):
# class_names = ['airplane','automobile','bird','cat','deer',
# 'dog','frog','horse','ship','truck']
# preds = model.predict(image)
# classification = np.argmax(preds)
# final = pd.DataFrame({'name' : np.array(class_names),'probability' :preds[0]})
# return final.sort_values(by = 'probability',ascending=False),class_names[classification]
def model_predict(img_path, model):
try:
with session.as_default():
with session.graph.as_default():
img = image.load_img(img_path, target_size=(32, 32,3))
# Preprocessing the image
# x = image.img_to_array(img)
# x = np.true_divide(x, 255)
x = np.expand_dims(img, axis=0)
# x = preprocess_input(x, mode='caffe')
preds = model.predict(np.array(x))
return preds
except Exception as ex:
log.log('Seatbelt Prediction Error', ex, ex.__traceback__.tb_lineno)
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# # image_url = request.form['image_url']
# # image = io.imread(image_url)
# image_small = st.resize(file_path, (32,32,3))
# x = np.expand_dims(image_small.transpose(2, 0, 1), axis=0)
# final,pred_class = classify(x, model)
# print(pred_class)
# print(final)
#Store model prediction results to pass to the web page
# message = "Model prediction: {}".format(pred_class)
# Make prediction
preds = model_predict(file_path, model)
print(preds)
number_to_class = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
index = np.argsort(preds[0,:])
# for x in range(len(number_to_class)):
# if number_to_class[x] == 1:
# print(preds[0][i])
# Process your result for human
pred_class = preds.argmax(axis=-1) # Simple argmax
# pred_class = decode_predictions(preds, top=1) # ImageNet Decode
# result = str(pred_class[0][1]) # Convert to string
return str(number_to_class[index[9]])+str(" index : ")+str(pred_class)
return None
if __name__ == '__main__':
app.run() | [
"[email protected]"
]
| |
aa36fc5578e1ff9d3e2ca3774590d9e2ac4b034b | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200626091702.py | d2722f72cb3c318b6baafd5cd7fd7285bc7c6d98 | []
| no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 2,407 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593100662316.1593100664951.15; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-session-id={"id":"435818e6a726415f46defffa27f7abc6","time":1593100221937}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100665; mojo-trace-id=17; _lxsdk_s=172ec2bff67-0c2-e9f-c64%7C%7C24__mta=251934006.1593072991075.1593100690175.1593100868002.17; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100868; _lxsdk_s=172ee2f4a3e-1c2-3a1-5a4%7C%7C1',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
response = requests.get(maoyanUrl,headers=header)
response.encoding = 'utf-8'
bs_info = bs(response.text,"html.parser")
# print(response.text)
for tag in bs_info.find_all('div',attrs={'class' : 'movie-item-content'}):
print(tag)
| [
"[email protected]"
]
| |
291422589918cff1a01a7a361b4c182bc37e09c5 | 65dba58b620e89db5113a60cf184cd6b26129e05 | /terms.py | 4b022f5ca152be0c0bbc7c9cb89af0bf9d827ca0 | []
| no_license | PriyankVIT/laughing-octo-journey | de3b32d69a170b97e71e4124dee0210e88d66a7b | a6849f4b42527ef4ccc67de6225954447e5a653a | refs/heads/master | 2020-08-23T10:59:00.633921 | 2019-10-21T15:09:15 | 2019-10-21T15:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | import pandas as pd
import numpy as np
from nltk import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from sklearn.metrics.pairwise import cosine_similarity
import networkx as nx
stop = set(stopwords.words("english"))
path="terms.txt"
parsedata=[]
count=0
with open(path) as fp:
while True:
messages=[]
line=fp.readline()
line=line.lower()
if not line:
print("False")
break
else:
sent=sent_tokenize(line)
for y in sent:
count+=1
print(y)
messages=[count,y]
parsedata.append(messages)
print(messages)
data= pd.DataFrame(parsedata,columns=['index','article'])
data.to_csv("terms.csv")
print(count)
terms=pd.read_csv("terms.csv")
terms=terms[['index','article']]
def stopwords_removal(line):
line=" ".join(x for x in line.split() if x not in stop)
return line
porter = PorterStemmer()
lancaster=LancasterStemmer()
def stemSentence(sentence):
token_words=word_tokenize(sentence)
token_words
stem_sentence=[]
for word in token_words:
stem_sentence.append(lancaster.stem(word))
stem_sentence.append(" ")
return "".join(stem_sentence)
terms['article']=terms['article'].apply(stopwords_removal)
sentences = []
for s in terms['article']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x] # flatten list
word_embeddings = {}
f = open('./glove/glove.6B.100d.txt', encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
word_embeddings[word] = coefs
f.close()
sentence_vectors = []
for i in sentences:
if len(i) != 0:
v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()])/(len(i.split())+0.001)
else:
v = np.zeros((100,))
sentence_vectors.append(v)
sim_mat = np.zeros([len(sentences), len(sentences)])
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,100), sentence_vectors[j].reshape(1,100))[0,0]
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph)
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
for i in range(10):
print(ranked_sentences[i][1]) | [
"[email protected]"
]
| |
5e44fefed00ea0624611ac5adddfb6765354a441 | 6b48567be365c5a8129511418b87dd1ace781662 | /artshop/apps.py | ecaad5d816515cf3f594cc3d3fcde153e768304d | []
| no_license | rachitptah/ptah | 8c7e42c5adb57412df2c5b07799d49104abbfc5c | 28757e68c7008b13d1c3ad1880fc8b1e0647e35d | refs/heads/master | 2020-05-29T08:54:08.475804 | 2016-10-15T06:22:59 | 2016-10-15T06:22:59 | 69,261,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class ArtshopConfig(AppConfig):
name = 'artshop'
| [
"Rachit Agarwal"
]
| Rachit Agarwal |
c4675dce2279ea3da44cfb77ff37f04d65a568de | b40c523eb48e899763cefbc5cbac1a9538b7524c | /test.py | a55de7e64221ef48a92e6455c286761886ce54cd | []
| no_license | kalexrt/Image-Colorization-using-CNN | b5ad355fa286280a61535bf245015d25d3108b16 | f69f4e7b6e550f22c289e44d977af0602b8309d9 | refs/heads/master | 2023-03-16T08:31:15.299794 | 2018-10-11T08:23:17 | 2018-10-11T08:23:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | #dependencies
import numpy as np
import cv2
import os
def read_images(path):
images = []
all_paths = os.listdir(path)
mini_set = all_paths[:400]
for i in mini_set:
file = path+"/"+i
image = cv2.imread(file)
image = cv2.resize(image,(128,128))
images.append(image)
return images
x = read_images("C:/Users/Arghyadeep/Desktop/image colorization/new process/val2017")
#cv2.imshow('image',x[1])
def extract_channels(lab_images):
l_channels = []
a_channels = []
b_channels = []
for i in lab_images:
l,a,b = cv2.split(i)
l_channels.append(l)
a_channels.append(a)
b_channels.append(b)
return np.array(l_channels), np.array(a_channels), np.array(b_channels)
l,a,b = cv2.split(x[1])
l = np.array(l)
l = l.reshape(128,128)
l = np.array(l)
print(l)
cv2.imshow('img',l)
| [
"[email protected]"
]
| |
1196e65bd8eaa474fa05264612daeb33c78b55f2 | 61b02c4ce6aee3ff3d04c35f56c15f312c93928c | /2-strings/8-regex-multiline-patterns.py | 6b6ba7195872bba422a36229b34c938fa8b8d36c | []
| no_license | thales-mro/python-cookbook | 1156c1a5d54b746e46dc49e8db73b324a7f8bd94 | c45e5f698aaeab5c1baa018e209d10d7b26f0a04 | refs/heads/master | 2020-11-25T20:33:43.859266 | 2020-01-15T22:30:03 | 2020-01-15T22:30:03 | 228,833,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import re
def main():
print("Dot operator does not take into consideration line breaks. Adaptation with non-capture group is needed.")
comment = re.compile(r'/\*((?:.|\n)*?)\*/')
text1 = '/* this is a comment in C */'
text2 = '''/* this is a
multiline comment in C */
'''
print(comment.findall(text1))
print(comment.findall(text2))
print("Or, using the DOTALL flag:")
comment = re.compile(r'/\*(.*?)\*/', re.DOTALL)
print(comment.findall(text1))
print(comment.findall(text2))
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
615479573109830cab79f8034fdf70a64c7497c5 | 56a813086d56a1c5c7c7dd84cf1da70d98f6cca8 | /csv-tools/find_key_word.py | d6c1af658e9fe88fc7ccadcba0a3905ba596f6b9 | [
"MIT"
]
| permissive | kokdemo/nlp-toolkit-for-pm | 1c475d870748171f26a166c6801bd673d2d2d1c1 | dd34307e8ffc1280443a2268b07e27a5d7bf1f3e | refs/heads/master | 2022-02-01T05:21:53.593690 | 2022-01-19T03:27:22 | 2022-01-19T03:27:22 | 148,502,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import csv
def read_csv(file_path_name,origin_row_header,keyword):
res_arr = []
csv_file = open(file_path_name,"r")
print('文件打开完成,地址在'+file_path_name)
dict_reader = csv.DictReader(csv_file)
for row in dict_reader:
if if_find_keyword(row[origin_row_header],keyword) == 1:
res_arr.append(row)
csv_file.close()
return res_arr
def if_find_keyword(text,keyword):
if text.find(keyword) == -1:
return 0
else:
return 1
def write_csv(data,file_path_name,file_header):
csv_file = open(file_path_name, "w")
dict_writer = csv.DictWriter(csv_file, file_header)
dict_writer.writeheader()
for row in data:
dict_writer.writerow(row)
csv_file.close()
print('文件写入完成,地址在'+file_path_name)
return 0
res_data = read_csv(file_path_name,origin_row_header,keyword);
write_csv(data,file_path_name,file_header); | [
"[email protected]"
]
| |
8a344aae06dbeb32785b94bf82c33c8f84c20b41 | 55d13d3e41d8651facf7c26d60de5e8b8ace4be5 | /piedpiper/crab/multicrab-0.py | ac77e6ce915307348ba0838f04a1b7373744c932 | []
| no_license | phylsix/Firefighter | e8ab5fdbde2dab341a67740aa62c5710683e9bab | 8f1d8d6e59b443a8216c70ebdd334b48945aeed0 | refs/heads/master | 2020-12-19T18:31:14.312639 | 2020-08-04T00:35:45 | 2020-08-04T00:35:45 | 235,812,142 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,190 | py | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
from os.path import basename, join
import yaml
from CRABAPI.RawCommand import crabCommand
from crabConfig_0 import *
from Firefighter.piedpiper.utils import *
verbose = False
alwaysDoCmd = True
if os.environ["CMSSW_BASE"] not in os.path.abspath(__file__):
print("$CMSSW_BASE: ", os.environ["CMSSW_BASE"])
print("__file__: ", os.path.abspath(__file__))
sys.exit("Inconsistant release environment!")
BASEDIR = join(os.environ["CMSSW_BASE"], "src/Firefighter/piedpiper")
CONFIG_NAME = sys.argv[1]
assert os.path.isfile(CONFIG_NAME)
def main():
multiconf = yaml.load(open(CONFIG_NAME).read())
gridpacks = multiconf["gridpacks"]
njobs = multiconf["njobs"]
year = multiconf["year"]
lxy = multiconf["lxy"]
ctaus = multiconf.get("ctaus", None)
assert len(gridpacks) == len(ctaus)
ctaumap = dict(zip(gridpacks, ctaus))
config.Data.totalUnits = config.Data.unitsPerJob * njobs
config.Data.outLFNDirBase += "/{0}".format(year)
# loop through
donelist = list()
for gridpack in gridpacks:
print("gridpack:", gridpack)
#'SIDM_XXTo2ATo4Mu_mXX-1000_mA-0p25_slc6_amd64_gcc481_CMSSW_7_1_30_tarball.tar.xz'
gridpack_name = basename(gridpack)
## outputPrimaryDataset: SIDM_XXTo2ATo4Mu or SIDM_XXTo2ATo2Mu2e
config.Data.outputPrimaryDataset = gridpack_name.split("_mXX")[0]
## outputDatasetTag: mXX-1000_mA-0p25_lxy-0p3_ctau-0p001875_GENSIM_2018
mxxma = gridpack_name.split("_", 2)[-1].split("_slc")[0]
lxystr = str(lxy).replace(".", "p")
ctaustr = str(ctaumap[gridpack]).replace(".", "p")
config.Data.outputDatasetTag = "{}_lxy-{}_ctau-{}_GENSIM_{}".format(
mxxma, lxystr, ctaustr, year
)
## requestName
config.General.requestName = "_".join(
[
config.Data.outputPrimaryDataset,
config.Data.outputDatasetTag,
time.strftime("%y%m%d-%H%M%S"),
]
)
if gridpack.startswith("root://"):
cpcmd = "xrdcp -f {0} {1}".format(gridpack, join(BASEDIR, "cfg/gridpack.tar.xz"))
elif gridpack.startswith("http"):
cpcmd = "wget -q {} -O {}".format(gridpack, join(BASEDIR, "cfg/gridpack.tar.xz"))
else:
cpcmd = "cp {0} {1}".format(gridpack, join(BASEDIR, "cfg/gridpack.tar.xz"))
if verbose:
print("$", cpcmd)
print(
"$ cat", join(BASEDIR, "python/externalLHEProducer_and_PYTHIA8_Hadronizer_cff.py")
)
print(get_gentemplate(year).format(CTAU=ctaumap[gridpack]))
print("------------------------------------------------------------")
print(config)
print("------------------------------------------------------------")
doCmd = True if alwaysDoCmd else raw_input("OK to go? [y/n]").lower() in ["y", "yes"]
if doCmd:
# 1. copy gridpack
os.system(cpcmd)
# 2. write genfrag_cfi
with open(
join(BASEDIR, "python/externalLHEProducer_and_PYTHIA8_Hadronizer_cff.py"), "w"
) as genfrag_cfi:
genfrag_cfi.write(get_gentemplate(year).format(CTAU=ctaumap[gridpack]))
# 3. write gen_cfg
cfgcmd = get_command("GEN-SIM", year, rand=False)
os.system(cfgcmd)
# 4. crab submit
crabCommand("submit", config=config)
donelist.append(gridpack)
print("submitted: ", len(donelist))
for x in donelist:
print(x)
print("------------------------------------------------------------")
undonelist = [x for x in gridpacks if x not in donelist]
print("unsubmitted: ", len(undonelist))
for x in undonelist:
print(x)
if undonelist:
with open("unsubmitted-0.yml.log", "w") as outf:
yaml.dump(
{"gridpacks": undonelist, "njobs": njobs, "year": year},
outf,
default_flow_style=False,
)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
5fdc08551c6d8e928a2cdfe94f16b447b3157fe9 | 90d7e077d4b5aac29d9aac2352d7d56da35bdd65 | /spell_checker.py | b6785324e2771cf2b86fc9705823d40153543c5c | []
| no_license | Apoorv7092/Ori | 2d0fb807b50dfb3f4ac64d6a33992ac2cb4db3ee | 46af2ee06d7427d36697bf1f3c1a1d6ad39d0224 | refs/heads/main | 2023-06-24T07:49:59.339665 | 2021-07-23T05:23:01 | 2021-07-23T05:23:01 | 388,685,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,381 | py |
import pandas as pd
import parameters
import re
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
fdf = pd.read_excel(parameters.training_data)
#message_col=list(fdf['message'])
tadaa = " ".join(list(fdf["message"]))
#tadaa = open('/home/rajput/Documents/Fasttext_final/testting/fastText-0.9.1/fastText-0.9.1/saddam70M').read()
tadaa1 = open(parameters.spell_checker_file).read()
tadaa+=tadaa1
word_list=tadaa.split()
words_dict={}
for i in range(len(word_list)):
words_dict[word_list[i]]=i
# print(type(tadaa))
# print(tadaa)
WORDS = Counter(words(tadaa))
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def correction1(word):
return max(candidates1(word), key=P)
def candidates1(word):
"Generate possible spelling corrections for word."
#return (known([word]) or known(edits1(word)) or known(edits2(word)) or known(edit3(word)) or [word])
return (known([word]) or known(edits1(word)) or [word])
def candidates(word):
"Generate possible spelling corrections for word."
#return (known([word]) or known(edits1(word)) or known(edits2(word)) or known(edit3(word)) or [word])
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
# def edit3(word):
# return (e3 for e2 in edits2(word) for e3 in edits2(e2))
def spell_checker(text):
#print('enter text')
#text1=input()
text=text.split()
modified_text=[]
for word in text:
if len(word)<=3:
modified_text.append(word)
elif len(word)==4:
if word not in words_dict:
modified_text.append(correction1(word))
else:
modified_text.append(word)
elif len(word)>4:
if word not in words_dict:
modified_text.append(correction(word))
else:
modified_text.append(word)
return " ".join(modified_text)
#print(correction('recharg'))
# while True:
# text=input()
# print(spell_checker(text))
# while True:
# print('enter text')
# text1=input()
# text=text1.split()
# modified_text=[]
# for word in text:
# if len(word)<=3:
# modified_text.append(word)
# else:
# modified_text.append(correction(word))
# print(" ".join(modified_text))
# print(text1)
# #print(correction('recharg'))
| [
"[email protected]"
]
| |
7b2362395c81e42eeb6741f5b8f5029495d61f19 | 182147503928530968928be0f0dbfd721f4f6bc5 | /test/transforms/test_minmaxscaler.py | 827e5f4f14772038a4afaa5b1dcf6013db683b13 | [
"MIT"
]
| permissive | omarirfa/praudio | dd04378b858f0d88382fbcf398deb318ed1e6cf1 | cc01f904eb22ff4c8a3dc4d69991c52feb843f3b | refs/heads/main | 2023-07-22T18:15:59.056767 | 2021-08-27T21:41:00 | 2021-08-27T21:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import numpy as np
import pytest
from praudio.transforms.scaling.minmaxscaler import MinMaxScaler
from praudio.transforms.transform import TransformType
from utils_transforms import sample_signal
@pytest.fixture
def min_max_scaler():
return MinMaxScaler(-1., 1.)
def test_loader_instance_is_instantiated_correctly(min_max_scaler):
assert isinstance(min_max_scaler, MinMaxScaler)
assert min_max_scaler.min_val == -1.
assert min_max_scaler.max_val == 1.
assert min_max_scaler.name == TransformType.MINMAXSCALER
def test_signal_is_normalised(min_max_scaler, sample_signal):
"""
GIVEN a Signal object
AND a min_val max_val normaliser object
WHEN the signal is passed to process
THEN the signal is normalised
"""
original_signal = sample_signal.data[:]
signal = min_max_scaler.process(sample_signal)
assert signal.name == "minmaxscaler_dummy"
assert type(signal.data) == np.ndarray
assert len(signal.data) == len(original_signal)
assert signal.data.max() == 1
assert signal.data.min() == -1
def test_1d_array_is_min_max_normalised(min_max_scaler):
array = np.array([-2, 0, 2])
norm_array = min_max_scaler._scale(array)
assert np.array_equal(norm_array, np.array([-1, 0, 1]))
def test_2d_array_is_min_max_normalised(min_max_scaler):
array = np.array([
[-2, 0, 2],
[-4, 0, 4]
])
expected_norm_array = np.array([
[-.5, 0, .5],
[-1, 0, 1]
])
norm_array = min_max_scaler._scale(array)
assert np.array_equal(norm_array, expected_norm_array) | [
"[email protected]"
]
| |
cfaca9464e29f20b8f94525c65c6d37541644890 | 37c7336cc40a133027460574a3afb6d4657ea2dd | /imgur/imgur | d9cfe70042cc57835226a51a17c9e6ad57251c1a | []
| no_license | udion/config | c1dc12e76ee00f379d293099b8b539e38a71a818 | 5c41cc81ad237d992318e9f5a20152b808292a5e | refs/heads/master | 2020-03-23T17:46:20.610632 | 2018-07-20T20:05:59 | 2018-07-20T20:05:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | #! /usr/bin/python
from sh import curl # install `sh` with `pip install sh`
import json
import sys
try:
resp = curl(
"https://api.imgur.com/3/image",
H="Authorization: Client-ID 4dda043bff518d8", # Get your client ID from imgur.com
X="POST",
F='image=@%s' % sys.argv[1]
)
objresp = json.loads(resp.stdout)
if objresp.get('success', False):
print objresp['data']['link']
else:
print 'Error: ', objresp['data']['error']
except Exception as e:
print 'Error: ', e
| [
"[email protected]"
]
| ||
d3bd8c51d6239f19186109f0ca17cf57933c4503 | 1d26fcc1673c78a03b2474102dddd63234863657 | /440 Final Project/neuralDigits.py | b692034f3653646f7b23a8fecc18f034a5786ea0 | []
| no_license | taotao-mars/AI-final-project | b47622927f87c83e863d28e59fb7a59d6afdc7f1 | b3e5892afad3dce64843b4c5efaab42917af42ff | refs/heads/master | 2020-03-16T17:38:34.619149 | 2018-05-10T03:18:27 | 2018-05-10T03:18:27 | 132,841,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | import samples
import numpy as np
from neuralNetwork import NeuralNetworkClassifier
def testing(num):
trainData = np.load("traindigitbasic.npy")
trainLabels = samples.loadLabelsFile("data/digitdata/traininglabels", num)
testData = np.load("testdigitbasic.npy")
testLabels = samples.loadLabelsFile("data/digitdata/testlabels", 1000)
validData = np.load("validationdigitbasic.npy")
validLabels = samples.loadLabelsFile("data/digitdata/validationlabels", 1000)
neural = NeuralNetworkClassifier(28 * 28, 50, 10, num, 3.5)
neural.train(trainData[:,0:num], trainLabels, 100)
print "*************Test Data*************"
guess = neural.classify(testData)
samples.verify(neural, guess, testLabels)
print "***********************************"
print "************Valid Data*************"
guess = neural.classify(validData)
samples.verify(neural, guess, validLabels)
if __name__ == "__main__":
sampleDigit=[500,1000,1500,2000,2500,3000,3500,4000,4500,5000]
sampleFace=[45,90,135,180,225,270,315,300,405,450]
sample=sampleDigit
for i in range(len(sample)):
print str(10*(i+1))+"%% training data, %d" % sample[i]
testing(sample[i])
print "***********************************"
| [
"[email protected]"
]
| |
b01aad91cb35baf3abbd1c78c8d260ab60c60cd9 | a4e2274db63cc2ab16ef3eda7cfde21d368e9605 | /N_50_59/N_58_2_leftReverse.py | 1970cf81a0388c97f4821fb038a00eee97e37f20 | []
| no_license | Simple-fang/sword-offer2-python | 3bfcae25697afc9fc152bc7ef808c5bf18035af5 | 8e16b3f647d6e7d29ac18352a109b20630feb688 | refs/heads/master | 2020-07-08T22:28:10.129057 | 2019-08-29T15:18:44 | 2019-08-29T15:18:44 | 203,797,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # -*- coding:utf-8 -*-
class Solution:
# 整体和部分分别旋转
def leftReverse(self, s, k):
if not isinstance(s, str) or len(s) == 0 or s == None or k <= 0:
return ''
strList = list(s)
self.reverse(strList, 0, k-1)
self.reverse(strList, k, len(strList)-1)
self.reverse(strList, 0, len(strList)-1)
return ''.join(strList)
def reverse(self, s, i, j):
if i > j:
return ''
while i <= j:
c = s[i]
s[i] = s[j]
s[j] = c
i += 1
j -= 1
# 直接使用python字符串拼接
def leftReverse2(self, s, k):
if not isinstance(s,str) or len(s)<=0 or s==None or k<0:
return ''
return s[k:] + s[:k]
test = Solution()
s = "123456789"
print test.leftReverse(s,4)
print test.leftReverse2(s,4) | [
"[email protected]"
]
| |
59b5cc9e5c1652af2d3ffa90116d914e8aaf7578 | 985c66c2fcdf22545dd27a1c9e299a5e862fdf06 | /020.py | 88c4480d684a50fd9e59da83a614b7ee0fbb1aa7 | []
| no_license | arpit0891/project-euler-1 | a9a6016158a15ea45f1eba1698bd914fb1d10a52 | e0dcec507a708265e655baec5a4d7bdb7b8540fa | refs/heads/master | 2021-02-23T01:31:32.843196 | 2013-05-04T10:43:20 | 2013-05-04T10:43:20 | 245,389,882 | 1 | 0 | null | 2020-03-06T10:21:39 | 2020-03-06T10:21:38 | null | UTF-8 | Python | false | false | 392 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#n! means n × (n − 1) × ... × 3 × 2 × 1
#For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
#and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
#Find the sum of the digits in the number 100!
#Answer:
#648
from mathplus import factorial
M = 100
print(sum(int(digit) for digit in str(factorial(M))))
| [
"[email protected]"
]
| |
cc6617779ee02a128a2381e1bb8d2819c9b0c84c | 726d8518a8c7a38b0db6ba9d4326cec172a6dde6 | /1282. Group the People Given the Group Size They Belong To/Solution.py | 4944ab7312ab85c95887d096c509715247085dc7 | []
| no_license | faterazer/LeetCode | ed01ef62edbcfba60f5e88aad401bd00a48b4489 | d7ba416d22becfa8f2a2ae4eee04c86617cd9332 | refs/heads/master | 2023-08-25T19:14:03.494255 | 2023-08-25T03:34:44 | 2023-08-25T03:34:44 | 128,856,315 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from typing import List
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
res = []
groups = {}
for i, e in enumerate(groupSizes):
groups.setdefault(e, []).append(i)
if len(groups[e]) == e:
res.append(groups[e])
groups[e] = []
return res
| [
"[email protected]"
]
| |
5504d599f5231dfb970d783217327010a3757c72 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/netapp/v20201201/snapshot.py | 5dc6e64c2d7c5f24cdb196fcb956b80495f2cc6e | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,959 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['SnapshotArgs', 'Snapshot']
@pulumi.input_type
class SnapshotArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
pool_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
volume_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Snapshot resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] volume_name: The name of the volume
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] snapshot_name: The name of the mount target
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "pool_name", pool_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "volume_name", volume_name)
if location is not None:
pulumi.set(__self__, "location", location)
if snapshot_name is not None:
pulumi.set(__self__, "snapshot_name", snapshot_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the NetApp account
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="poolName")
def pool_name(self) -> pulumi.Input[str]:
"""
The name of the capacity pool
"""
return pulumi.get(self, "pool_name")
@pool_name.setter
def pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pool_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="volumeName")
def volume_name(self) -> pulumi.Input[str]:
"""
The name of the volume
"""
return pulumi.get(self, "volume_name")
@volume_name.setter
def volume_name(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the mount target
"""
return pulumi.get(self, "snapshot_name")
@snapshot_name.setter
def snapshot_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_name", value)
class Snapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Snapshot of a Volume
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] snapshot_name: The name of the mount target
:param pulumi.Input[str] volume_name: The name of the volume
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Snapshot of a Volume
:param str resource_name: The name of the resource.
:param SnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SnapshotArgs.__new__(SnapshotArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["location"] = location
if pool_name is None and not opts.urn:
raise TypeError("Missing required property 'pool_name'")
__props__.__dict__["pool_name"] = pool_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["snapshot_name"] = snapshot_name
if volume_name is None and not opts.urn:
raise TypeError("Missing required property 'volume_name'")
__props__.__dict__["volume_name"] = volume_name
__props__.__dict__["created"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["snapshot_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20201201:Snapshot"), pulumi.Alias(type_="azure-native:netapp:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20170815:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190501:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190601:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190701:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190801:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20191001:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20191101:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200201:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200301:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200501:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200601:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200701:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200801:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200901:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20201101:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20210201:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20210201:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20210401preview:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20210401preview:Snapshot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Snapshot, __self__).__init__(
'azure-native:netapp/v20201201:Snapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Snapshot':
"""
Get an existing Snapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SnapshotArgs.__new__(SnapshotArgs)
__props__.__dict__["created"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["snapshot_id"] = None
__props__.__dict__["type"] = None
return Snapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
"""
The creation date of the snapshot
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> pulumi.Output[str]:
"""
UUID v4 used to identify the Snapshot
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
d21dbb0c57c7a975df7de3bd6cb16ad4802d1866 | e4104ad99575a394157b480dc8724a026cb743b7 | /web_develop/apps.py | 1ca6c73e9d7223367f11a829f9805584c1b696dd | []
| no_license | fourston/UnitCube | f0ca2e40e296db60f0aa3d2769fe360205da2eb0 | c008ef0947e4448c433d4b5a7615ae1ca8f41fc5 | refs/heads/master | 2021-05-06T21:42:45.015419 | 2017-11-29T23:53:12 | 2017-11-29T23:53:12 | 112,540,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class WebDevelopConfig(AppConfig):
name = 'web_develop'
| [
"[email protected]"
]
| |
e1ca10f66fe6e4a01b92ace526335679d0427751 | 42f4238073a70d1494537f8c8b07835b531e73a9 | /benchmarks/beach/redist_beach_erosion_board_waves_3d_c0p1_n.py | e0d724264d63efa1c4516fe87fb96968f2ac296f | []
| no_license | erdc/proteus-mprans | bd99257af7b3bbe08386533faf072dba22e93a61 | f8f4d20bc870b361c64c8ca2ceb99f045b373323 | refs/heads/master | 2022-09-11T13:18:39.973962 | 2022-08-11T16:27:29 | 2022-08-11T16:27:29 | 2,303,947 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | from proteus import *
from proteus.default_n import *
from redist_beach_erosion_board_waves_3d_p import *
from beach_erosion_board_waves_3d import *
if rdtimeIntegration == 'newton':
timeIntegration = NoIntegration
stepController = Newton_controller
elif rdtimeIntegration == 'tte':
timeIntegration = BackwardEuler_cfl
timeIntegration = PsiTCtte
elif rdtimeIntegration == 'osher-fmm':
timeIntegration = BackwardEuler_cfl
stepController = Osher_FMM_controller
runCFL=1.0
else:
timeIntegration = BackwardEuler_cfl
stepController = Osher_PsiTC_controller
#stepController = Osher_controller
runCFL=1.0
# timeIntegration = PsiTCtte
# stepController = PsiTCtte_controller
# rtol_res[0] = 0.0
# atol_res[0] = 0.1*L[0]/(nn-1.0)#10% of he
#runCFL=1.0
#DT=None
if spaceOrder == 1:
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
if spaceOrder == 2:
femSpaces = {0:C0_AffineQuadraticOnSimplexWithNodalBasis}
elementQuadrature = SimplexGaussQuadrature(nd,sloshbox_quad_order)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,sloshbox_quad_order)
subgridErrorType = HamiltonJacobi_ASGS
if LevelModelType == RDLS.LevelModel:#RDLSV2.OneLevelRDLSV2 and not RDLSV2.debugRDLS:
subgridErrorType = HamiltonJacobi_ASGS_opt
if rdtimeIntegration == 'newton':
subgridError = subgridErrorType(coefficients,nd,stabFlag='2',lag=False)
else:
subgridError = subgridErrorType(coefficients,nd,stabFlag='2',lag=True)
#subgridError = HamiltonJacobi_ASGS(coefficients,nd,lag=True)
shockCapturing = None
#shockCapturing = ResGrad_SC(coefficients,nd,shockCapturingFactor=0.9,lag=False)
if rdtimeIntegration == 'newton':
shockCapturing = ResGradQuad_SC(coefficients,nd,shockCapturingFactor=rd_shockCapturingFactor,lag=False)
else:
shockCapturing = ResGradQuad_SC(coefficients,nd,shockCapturingFactor=rd_shockCapturingFactor,lag=True)
massLumping = False
#multilevelNonlinearSolver = MultilevelEikonalSolver
#levelNonlinearSolver = UnstructuredFMMandFSWsolvers.FMMEikonalSolver
multilevelNonlinearSolver = NLNI
levelNonlinearSolver = Newton
if rdtimeIntegration != 'newton':
maxLineSearches = 0
nonlinearSmoother = NLGaussSeidel
fullNewtonFlag = True
#this needs to be set appropriately for pseudo-transient
tolFac = 0.0
nl_atol_res = 0.01*L[0]/nn
atol_res[0] = 1.0e-6 #for pseudo transient
rtol_res[0] = 0.0
numericalFluxType = DoNothing
maxNonlinearIts = 50 #1 for PTC
matrix = SparseMatrix
if usePETSc:
numericalFluxType = DoNothing
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
linearSmoother = GaussSeidel
linTolFac = 0.001
conservativeFlux = None
| [
"[email protected]"
]
| |
68f09c614ab5f1231f49f77c4804d2cb9765d55d | 90df1c6ccfd65a51e44c067f5079c2262f0fd767 | /BarChart.py | b397a0ab5bcf7d788b23b72807454aa8c7e45f50 | []
| no_license | sudalvi/First-Week | f8083211721d16e051d7898225cd8c17912fbc14 | 7a6e952229c6af12d69e27b9486e7aee4480779e | refs/heads/master | 2020-06-01T06:59:23.991506 | 2019-06-07T04:52:35 | 2019-06-07T04:52:35 | 190,689,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import matplotlib.pyplot as plt
x = ['Java', 'Python', 'PHP', 'JavaScript', 'C#', 'C++']
popularity = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
pos = [ i for i , _ in enumerate(x)]
plt.bar(pos, popularity, color =(0.4, 0.6, 0.8, 1.0), edgecolor="blue")
plt.xticks(pos, x)
plt.minorticks_on()
plt.xlabel("Language")
plt.ylabel("Popularity")
plt.title("Bar Chart For Programing Language Poularity")
plt.grid(which="major", linestyle='-', linewidth='0.5', color="red")
plt.grid(which="minor", linestyle=":", linewidth='0.5', color="black")
plt.show()
#Horizental Chart
y = ['Java', 'Python', 'PHP', 'JavaScript', 'C#', 'C++']
popularity = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
pos = [ i for i , _ in enumerate(y)]
plt.barh(pos, popularity, color =['purple', 'red', 'orange', 'yellow', 'pink','cyan'], edgecolor="blue")
plt.yticks(pos, y)
plt.minorticks_on()
plt.ylabel("Language")
plt.xlabel("Popularity")
plt.title("Bar Chart For Programing Language Poularity")
plt.grid(which="major", linestyle='-', linewidth='0.5', color="red")
plt.grid(which="minor", linestyle=":", linewidth='0.5', color="black")
plt.show()
#Text Lable On Bar
x = ['Java', 'Python', 'PHP', 'JavaScript', 'C#', 'C++']
popularity = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
x_pos = [i for i, _ in enumerate(x)]
fig, ax = plt.subplots()
rects1 = ax.bar(x_pos, popularity, color='b')
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.title("Text Lable on Bar Chart")
plt.xticks(x_pos, x)
plt.minorticks_on()
plt.grid(which="major", linestyle="-", linewidth="0.5", color="red")
plt.grid(which="minor", linestyle=":", linewidth="0.5", color="blue")
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%f' % float(height),
ha="center", va="bottom")
autolabel(rects1)
plt.show()
| [
"[email protected]"
]
| |
a9bb0f690afa0a9da4041ea70f4527cbe0ef2c3a | 761beb2a465800f992c8590767d8cd84e1480a4c | /pySON.py | 1175a5b0a060e8d6f1118ad7b2cf3bb8819ff340 | []
| no_license | willh99/PRESS-RPi | 5109aed872ef1f65249f683a3f68d141d4e995bb | 5b0587158890c42f01538f36db91124cf507abe5 | refs/heads/master | 2021-04-28T08:05:17.638496 | 2018-04-11T17:46:10 | 2018-04-11T17:46:10 | 122,240,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | import json
import time
import random
import datetime
def read_json(filename):
if '.json' not in filename:
return -1
try:
with open(filename, 'r') as f:
print("File \"", filename, "\" found", sep='')
data = json.load(f)
return data
except FileNotFoundError:
print("File Not Found")
return -1
def append_json(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=2)
# print("wrote to file")
def create_status(buy, sell, isprice):
now = datetime.datetime.now()
now = now.strftime('%d-%m-%Y %X')
data = {"Sell": sell, "Buy": buy, "Timestamp": now}
if isprice:
filename = 'price_status.json'
else:
filename = 'status.json'
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
def profit_file(mode, profit):
try:
if mode == 'Read':
with open('profit.txt', 'r') as f:
return f.readline().split()[0]
except FileNotFoundError:
print("File Not Found")
return 0
with open('profit.txt', 'w') as f:
f.write(str(profit))
if __name__ == "__main__":
json_list = []
for x in range(0, 100):
i = random.random()*12.8
dictionary = {"Timestamp": time.asctime(time.localtime()),
"Voltage": round(i, 6)}
if len(json_list) >= 50:
json_list.pop(0)
json_list.append(dictionary)
# time.sleep(.2)
append_json(json_list)
something = read_json('status.json')
if something is not -1:
print(json.dumps(something, indent=2))
profit_file('Write', 1129.124)
print(profit_file('Read', 0))
| [
"[email protected]"
]
| |
e0887b70f4b7024270a588e59d6a5d81ec0959c3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/signalrservice/v20210601preview/get_signal_r.py | e126d745e3c8f5f3fc1a5876c117c9fc8754627f | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,073 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSignalRResult',
'AwaitableGetSignalRResult',
'get_signal_r',
'get_signal_r_output',
]
@pulumi.output_type
class GetSignalRResult:
"""
A class represent a resource.
"""
def __init__(__self__, cors=None, disable_aad_auth=None, disable_local_auth=None, external_ip=None, features=None, host_name=None, id=None, identity=None, kind=None, location=None, name=None, network_acls=None, private_endpoint_connections=None, provisioning_state=None, public_network_access=None, public_port=None, server_port=None, shared_private_link_resources=None, sku=None, system_data=None, tags=None, tls=None, type=None, upstream=None, version=None):
if cors and not isinstance(cors, dict):
raise TypeError("Expected argument 'cors' to be a dict")
pulumi.set(__self__, "cors", cors)
if disable_aad_auth and not isinstance(disable_aad_auth, bool):
raise TypeError("Expected argument 'disable_aad_auth' to be a bool")
pulumi.set(__self__, "disable_aad_auth", disable_aad_auth)
if disable_local_auth and not isinstance(disable_local_auth, bool):
raise TypeError("Expected argument 'disable_local_auth' to be a bool")
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if external_ip and not isinstance(external_ip, str):
raise TypeError("Expected argument 'external_ip' to be a str")
pulumi.set(__self__, "external_ip", external_ip)
if features and not isinstance(features, list):
raise TypeError("Expected argument 'features' to be a list")
pulumi.set(__self__, "features", features)
if host_name and not isinstance(host_name, str):
raise TypeError("Expected argument 'host_name' to be a str")
pulumi.set(__self__, "host_name", host_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_acls and not isinstance(network_acls, dict):
raise TypeError("Expected argument 'network_acls' to be a dict")
pulumi.set(__self__, "network_acls", network_acls)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_network_access and not isinstance(public_network_access, str):
raise TypeError("Expected argument 'public_network_access' to be a str")
pulumi.set(__self__, "public_network_access", public_network_access)
if public_port and not isinstance(public_port, int):
raise TypeError("Expected argument 'public_port' to be a int")
pulumi.set(__self__, "public_port", public_port)
if server_port and not isinstance(server_port, int):
raise TypeError("Expected argument 'server_port' to be a int")
pulumi.set(__self__, "server_port", server_port)
if shared_private_link_resources and not isinstance(shared_private_link_resources, list):
raise TypeError("Expected argument 'shared_private_link_resources' to be a list")
pulumi.set(__self__, "shared_private_link_resources", shared_private_link_resources)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tls and not isinstance(tls, dict):
raise TypeError("Expected argument 'tls' to be a dict")
pulumi.set(__self__, "tls", tls)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if upstream and not isinstance(upstream, dict):
raise TypeError("Expected argument 'upstream' to be a dict")
pulumi.set(__self__, "upstream", upstream)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def cors(self) -> Optional['outputs.SignalRCorsSettingsResponse']:
"""
Cross-Origin Resource Sharing (CORS) settings.
"""
return pulumi.get(self, "cors")
@property
@pulumi.getter(name="disableAadAuth")
def disable_aad_auth(self) -> Optional[bool]:
"""
DisableLocalAuth
Enable or disable aad auth
When set as true, connection with AuthType=aad won't work.
"""
return pulumi.get(self, "disable_aad_auth")
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[bool]:
"""
DisableLocalAuth
Enable or disable local auth with AccessKey
When set as true, connection with AccessKey=xxx won't work.
"""
return pulumi.get(self, "disable_local_auth")
@property
@pulumi.getter(name="externalIP")
def external_ip(self) -> str:
"""
The publicly accessible IP of the resource.
"""
return pulumi.get(self, "external_ip")
@property
@pulumi.getter
def features(self) -> Optional[Sequence['outputs.SignalRFeatureResponse']]:
"""
List of the featureFlags.
FeatureFlags that are not included in the parameters for the update operation will not be modified.
And the response will only include featureFlags that are explicitly set.
When a featureFlag is not explicitly set, its globally default value will be used
But keep in mind, the default value doesn't mean "false". It varies in terms of different FeatureFlags.
"""
return pulumi.get(self, "features")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
FQDN of the service instance.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
The managed identity response
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
The kind of the service - e.g. "SignalR" for "Microsoft.SignalRService/SignalR"
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The GEO location of the resource. e.g. West US | East US | North Central US | South Central US.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkACLs")
def network_acls(self) -> Optional['outputs.SignalRNetworkACLsResponse']:
"""
Network ACLs
"""
return pulumi.get(self, "network_acls")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:
"""
Private endpoint connections to the resource.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Enable or disable public network access. Default to "Enabled".
When it's Enabled, network ACLs still apply.
When it's Disabled, public network access is always disabled no matter what you set in network ACLs.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter(name="publicPort")
def public_port(self) -> int:
"""
The publicly accessible port of the resource which is designed for browser/client side usage.
"""
return pulumi.get(self, "public_port")
@property
@pulumi.getter(name="serverPort")
def server_port(self) -> int:
"""
The publicly accessible port of the resource which is designed for customer server side usage.
"""
return pulumi.get(self, "server_port")
@property
@pulumi.getter(name="sharedPrivateLinkResources")
def shared_private_link_resources(self) -> Sequence['outputs.SharedPrivateLinkResourceResponse']:
"""
The list of shared private link resources.
"""
return pulumi.get(self, "shared_private_link_resources")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ResourceSkuResponse']:
"""
The billing information of the resource.(e.g. Free, Standard)
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags of the service which is a list of key value pairs that describe the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def tls(self) -> Optional['outputs.SignalRTlsSettingsResponse']:
"""
TLS settings.
"""
return pulumi.get(self, "tls")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource - e.g. "Microsoft.SignalRService/SignalR"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def upstream(self) -> Optional['outputs.ServerlessUpstreamSettingsResponse']:
"""
Upstream settings when the service is in server-less mode.
"""
return pulumi.get(self, "upstream")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the resource. Probably you need the same or higher version of client SDKs.
"""
return pulumi.get(self, "version")
class AwaitableGetSignalRResult(GetSignalRResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSignalRResult(
cors=self.cors,
disable_aad_auth=self.disable_aad_auth,
disable_local_auth=self.disable_local_auth,
external_ip=self.external_ip,
features=self.features,
host_name=self.host_name,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
network_acls=self.network_acls,
private_endpoint_connections=self.private_endpoint_connections,
provisioning_state=self.provisioning_state,
public_network_access=self.public_network_access,
public_port=self.public_port,
server_port=self.server_port,
shared_private_link_resources=self.shared_private_link_resources,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
tls=self.tls,
type=self.type,
upstream=self.upstream,
version=self.version)
def get_signal_r(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSignalRResult:
"""
A class represent a resource.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:signalrservice/v20210601preview:getSignalR', __args__, opts=opts, typ=GetSignalRResult).value
return AwaitableGetSignalRResult(
cors=__ret__.cors,
disable_aad_auth=__ret__.disable_aad_auth,
disable_local_auth=__ret__.disable_local_auth,
external_ip=__ret__.external_ip,
features=__ret__.features,
host_name=__ret__.host_name,
id=__ret__.id,
identity=__ret__.identity,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
network_acls=__ret__.network_acls,
private_endpoint_connections=__ret__.private_endpoint_connections,
provisioning_state=__ret__.provisioning_state,
public_network_access=__ret__.public_network_access,
public_port=__ret__.public_port,
server_port=__ret__.server_port,
shared_private_link_resources=__ret__.shared_private_link_resources,
sku=__ret__.sku,
system_data=__ret__.system_data,
tags=__ret__.tags,
tls=__ret__.tls,
type=__ret__.type,
upstream=__ret__.upstream,
version=__ret__.version)
@_utilities.lift_output_func(get_signal_r)
def get_signal_r_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSignalRResult]:
"""
A class represent a resource.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
...
| [
"[email protected]"
]
| |
fedf9a4adf68e18a4d492e204426b009e4c03540 | 299c07abf832ba8b0a4181c526f95d6f861c3623 | /pages/views.py | 483e3caba3bab343c52bb5dfe15734215146eb40 | []
| no_license | ananyajana/hello-world | 37640880b8df2b170a4d64a7893eced35cf07293 | c498ec70016e22978f2c3f0365d6a38522254b72 | refs/heads/master | 2023-08-14T22:52:25.634216 | 2020-05-25T11:38:28 | 2020-05-25T11:38:28 | 266,763,806 | 0 | 0 | null | 2021-09-22T19:09:50 | 2020-05-25T11:39:56 | Python | UTF-8 | Python | false | false | 166 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def homePageView(request):
return HttpResponse('Hello, World!')
| [
"[email protected]"
]
| |
89baa3c77a0325571b7687166c88e9b24ca6bc99 | 22e496318558b8ad489fcd1f1a40de96161f29e8 | /source/body2d.py | 3bbf70ca732140f600fb41dc3755aa2c6a80595c | []
| no_license | x00001101/py | a56000c07683521329cec7e13cfbb6ae5cb0a296 | 418b467353edf6d82cde35c140486cb47d875c97 | refs/heads/master | 2023-06-12T09:28:11.541361 | 2021-07-04T12:11:09 | 2021-07-04T12:11:09 | 381,191,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import pygame
from source.variable import *
class body2d:
def __init__(self):
pass
def render(self, x, y, width, height):
body = pygame.Rect(x, y + height//2, width, height//2) | [
"[email protected]"
]
| |
2b17091186348ffd159fcb0740f2e1355f414123 | 42407abcb0b5bae8f2becd20995b3272ddf24a8c | /GANs/simple_gan.py | 7f95539411e188a86db54d87fed8d2239dcd0d93 | []
| no_license | xeddmc/POCs | 4313e6a3c8b473db6f8d975bbf27fba37e13dfc6 | b8c8ea1a839c23fc5c35997eb484ce9b0e9031b8 | refs/heads/master | 2023-03-15T11:58:35.438310 | 2020-04-03T05:14:27 | 2020-04-03T05:14:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,012 | py | import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Reshape, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.datasets import mnist
from keras.optimizers import Adam
from keras import backend as K
from keras import initializers
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5)/127.5
X_train = X_train.reshape(60000, 784)
def generator():
gen=Sequential()
gen.add(Dense(256,input_dim=100))
gen.add(LeakyReLU(0.2))
gen.add(Dense(512))
gen.add(LeakyReLU(0.2))
gen.add(Dense(1024))
gen.add(LeakyReLU(0.2))
gen.add(Dense(784,activation='tanh'))
gen.compile(loss='binary_crossentropy',optimizer=Adam(lr=0.0002, beta_1=0.5))
return gen
def discriminator():
disc=Sequential()
disc.add(Dense(1024,input_dim=784))
disc.add(LeakyReLU(0.2))
disc.add(Dropout(0.2))
disc.add(Dense(512))
disc.add(LeakyReLU(0.2))
disc.add(Dropout(0.2))
disc.add(Dense(256))
disc.add(LeakyReLU(0.2))
disc.add(Dropout(0.2))
disc.add(Dense(1,activation='sigmoid'))
disc.compile(loss='binary_crossentropy',optimizer=Adam(lr=0.0002, beta_1=0.5))
return disc
def stacked_GAN(gen,disc):
disc.trainable=False
gan_input=Input(shape=(100,))
x=gen(gan_input)
gan_out=disc(x)
gan_stack=Model(inputs=gan_input,outputs=gan_out)
gan_stack.compile(loss='binary_crossentropy',optimizer=Adam(lr=0.0002, beta_1=0.5))
return gan_stack
def test(gen,i):
noise=np.random.normal(0,1,(1,100))
image=np.squeeze(gen.predict(noise),axis=0)
plt.imsave('/home/vaibhav/deep_learning/gan/code/images2/epoch_%d'%i,image.reshape(28,28),format='jpg',cmap='gray')
def train(max_iter,batch_size,gen,disc,gan_stack):
for i in range(0,max_iter):
noise=np.random.normal(0,1,(batch_size,100))
image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size)]
fake_images=gen.predict(noise)
final_images=np.concatenate([image_batch,fake_images])
final_labels=np.concatenate((np.ones((np.int64(batch_size), 1)), np.zeros((np.int64(batch_size), 1))))
disc.trainable=True
disc_loss=disc.train_on_batch(final_images,final_labels)
disc.trainable=False
noise_gen=np.random.normal(0,1,(batch_size,100))
y_mis_labels=np.ones(batch_size)
gen_loss=gan_stack.train_on_batch(noise,y_mis_labels)
print('epoch_%d---->gen_loss:[%f]---->disc_loss:[%f]'%(i,gen_loss,disc_loss))
if i%100==0:
#gen.save_weights('/home/vaibhav/deep_learning/gan/code/gen_weights/epoch_%d.h5'%i)
#disc.save_weights('/home/vaibhav/deep_learning/gan/code/disc_weights/epoch_%d.h5'%i)
test(gen,i)
pass
gen=generator()
disc=discriminator()
gan_stack=stacked_GAN(gen,disc)
max_iter=20000
batch_size=32
train(max_iter,batch_size,gen,disc,gan_stack)
| [
"[email protected]"
]
| |
03622786a4de2d5c12beee1a16d5fba75dcf2347 | 29ad9caf139fab91580d7be36c9bd07623c4ca4d | /py/edu_freq_min.py | fc3b361beeafe60bea31d57a072936492e1f99f0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
]
| permissive | bcgov/flatfile-tools | f324687389a508aad641131f70bb66c533917bbe | 749071129cab7a598bd4c2edf050dce59324a97f | refs/heads/master | 2021-06-10T15:14:08.266856 | 2021-05-13T14:23:48 | 2021-05-13T14:23:48 | 183,680,156 | 2 | 0 | Apache-2.0 | 2020-04-15T02:21:47 | 2019-04-26T18:58:46 | Python | UTF-8 | Python | false | false | 3,247 | py | # 20190315 take pharmanet dispensations, look for earliest dispense date of drug type, as well as dispense frequency
# output has same data, with freq and min_srv_date added
import os
import sys
import time
from misc import*
def expected(f_name, lookup):
if f_name not in lookup:
err("expected field: " + str(f_name))
def freq_min(fn):
f = open(fn)
if f == None:
err("failed to open file: " + str(fn))
fields = f.readline().strip().split(",")
print fields
lookup = {}
for i in range(0, len(fields)):
lookup[fields[i].lower()] = i
print " ", lookup
for i in ["studyid", "hp.din_pin"]:
expected(i, lookup)
#mindate, freq = f(studyid, hp.din_pin)
dat = {}
ci = 0
f_size = os.stat(fn).st_size
tt = ttt = t_0 = time.time()
while True:
words = None
try:
words = f.readline().strip().split(",")
except:
break
if words == ['']:
continue
for i in range(0, len(words)):
words[i] = words[i].strip().lower()
if len(words) != len(fields):
print words
err("wrong number of fields, check csv file")
key = words[lookup["studyid"]] + "," + words[lookup["hp.gen_drug"]]
if key not in dat:
# freq = 1, min(serv_date) = serve_date
dat[key] = [1, words[lookup["srv_date"]]]
else:
freq, min_serv_date = dat[key]
freq += 1
date = words[lookup["srv_date"]]
min_serv_date = min_serv_date if min_serv_date < date else date
dat[key] = [freq, min_serv_date]
ci += 1
if ci % 100000 == 0:
ttt = tt
tt = time.time()
print "file", " %: ", 100. * (float(f.tell()) / float(f_size)), " MB/s:", (float(f.tell()) / 1000000.) / (tt- t_0)#
f.close()
f = open(fn)
if f is None:
err("failed to open file: " + str(fn))
print " +r " + fn
g_n = fn + "_freq-min.csv"
print " +w " + g_n
g = open(g_n, "wb")
print " +w " + g_n
if g is None:
err("failed to open file: " + str(g_n))
fields.append("freq")
fields.append("min_srv_date")
g.write(",".join(fields))
f.readline() # fields
ci = 0
while True:
line, words = None, None
try:
line = f.readline().strip()
except:
break
if line == "":
continue
words = line.split(",")
for i in range(0, len(words)):
words[i] = words[i].strip().lower()
key = words[lookup["studyid"]] + "," + words[lookup["hp.gen_drug"]]
if key not in dat:
err("key should have been found")
freq, min_serv_date = dat[key]
g.write("\n" + line + "," + str(freq) + "," + str(min_serv_date))
ci += 1
if ci % 100000 == 0:
ttt = tt
tt = time.time()
print "file", " %: ", 100. * (float(f.tell()) / float(f_size)), " MB/s:", (float(f.tell()) / 1000000.) / (tt- t_0)#
f.close()
g.close()
freq_min("dsp_rpt.dat_slice.csv_select-STUDY.csv_lookup.csv")
freq_min("dsp_rpt.dat_slice.csv_select-CONTROL.csv_lookup.csv") | [
"[email protected]"
]
| |
2c9cc86f812212a89a8a7ef7beb3f89062ce51fa | af8c27af80884e5d0d4ccb51ba469802bce9e8f3 | /hari/urls.py | d3b079dd634fa575e3ada9ff0f1f9f73e1458429 | []
| no_license | HariVardhanReddy123/Django | 8b8b1bcb011d5a807b37597803dea392d92661bd | a12555fd2191997469b8032c6696a404560203a5 | refs/heads/master | 2023-07-07T23:15:46.388132 | 2021-08-22T11:12:13 | 2021-08-22T11:12:13 | 398,777,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | """hari URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('home.urls'))
]
| [
"[email protected]"
]
| |
b7042ac12c864cf87d88242eccf186629d33bcf7 | 5c2b39d11d201110ec58fe04d96577189f768928 | /WebServerSocketPt2.py | c7148a56e2e995e7279459270e3b017dd79a71c6 | []
| no_license | Thaileaf/WebServer | 4d34b02d16975522c547a76a00985a637b7170f2 | 5be24e60bfec9c5d788405b2432c80ebea7525be | refs/heads/main | 2023-02-24T07:56:32.906769 | 2021-01-24T08:10:00 | 2021-01-24T08:10:00 | 332,134,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | import io
import socket
import sys
class WSGIServer:
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1
def __init__(self, server_address):
# Initializes the listening socke
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Sets socket option of reusing address at level of sol_socket(socket level) to 1 (True)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Binds address to socket
listen_socket.bind(server_address)
# Listen to (request size) amount of requests
listen_socket.listen(self.request_queue_size)
# Gets server host name and port
host, port = self.listen_socket.getsockname()[:2]
# Get fully qualified domain name
self.server_name = socket.getfqdn(host)
self.server_port = port
self.headers_set = []
| [
"[email protected]"
]
| |
15513b01280cc9299dd44d32f89de0877a1a65ee | 41b743de20560539b3da72730ea7a3202b496225 | /subcat/urls.py | c435b509c989147d90f2c15a556a3fa1ccb2d7b9 | []
| no_license | V1TO-C/Django_Tutorial | ea379d53d48683393708e59eb8371c30b32a58c1 | 123801be0ca1eb5debc3329dabed6d313fb3b7b9 | refs/heads/master | 2023-01-04T22:23:04.775915 | 2020-10-28T17:54:36 | 2020-10-28T17:54:36 | 301,209,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^panel/subcat_list/$', views.subcat_list, name='subcat_list'),
url(r'^panel/subcat_add/$', views.subcat_add, name='subcat_add'),
url(r'^panel/subcat_delete/(?P<pk>\d+)$', views.subcat_delete, name='subcat_delete'),
] | [
"[email protected]"
]
| |
dde81e93ee1d134f92771c4281c3c3de4a81daac | 94478c8ecd9b2f4a9b0c37c7c43b3061d971c0ba | /test/test_parser.py | ef7b34e21a0640bf90749b4300f76e853b49b0a9 | [
"MIT"
]
| permissive | Larryun/coursenotify | fd7854774bce8f2d8d785e790726cbcee77dca69 | e82794ba83d42c2cca9b2dc7c6e6108dcecaba54 | refs/heads/master | 2023-02-12T08:32:18.950579 | 2021-01-13T06:18:18 | 2021-01-13T06:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | import unittest
from cn_v2.parser.course import *
class TestParser(unittest.TestCase):
def test_run(self):
p = CourseParser("../config/dev.yaml", CourseParser.DA)
# soup = p.get_soup()
# p.save_soup(soup)
result = list(set(p.parse()))
print(len(result))
| [
"[email protected]"
]
| |
c7c0b024c76f35d87b2037f5b47756c06d171305 | d5e251fee00fa17ceb1834a37ac2c5d0426cfab6 | /firstapp/untitled.py | 48dab678758d1fe41b17861e5d70740e94a17adc | []
| no_license | unique1o1/SmartSociety | 068382dd203489c607cee9dbeb0db94268c4c03d | 03b6b5fe8e2eb015cd4125c99a16c8d8988cc346 | refs/heads/master | 2021-01-21T06:39:46.820830 | 2019-03-08T13:00:38 | 2019-03-08T13:00:38 | 83,269,214 | 0 | 1 | null | 2017-02-27T07:59:56 | 2017-02-27T04:48:43 | CSS | UTF-8 | Python | false | false | 233 | py | from pyfirmata import Arduino, util
class runcommmand():
def __init__(self):
board = Arduino('/dev/ttyACM0')
x=board.get_pin('d:13:i')
x=x.read()
return x
def main():
hey=runcommmand()
if __name__=="__main__":
main() | [
"[email protected]"
]
| |
73ca263730fd3355239897a0ff055657d5d6bcb3 | f22fa4e546e2e3c441429ffbc9639411b858a903 | /pylex/visit_nodes.py | 6843396bd1d3d68a6cbd640e268caa8d1f85d4fc | []
| no_license | aleert/pylex | 47f38f0554abfce06b2676d33c0f9fd34e99dff9 | b13097247e3ef03de60c168fc4cbf0f47e6b0d3c | refs/heads/master | 2020-05-19T04:23:25.128638 | 2019-05-17T21:20:38 | 2019-05-17T21:20:38 | 184,823,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | # -*- coding: utf-8 -*-
import ast
from ast import NodeVisitor
from pathlib import Path
from typing import Iterable
from pylex import visitor_mixins
NODE_TYPES = {
'FunctionDef': visitor_mixins.FunctionDefMixin,
'function': visitor_mixins.FunctionDefMixin,
'ClassDef': visitor_mixins.ClassDefMixin,
'class': visitor_mixins.ClassDefMixin,
'Assign': visitor_mixins.AssignMixin,
'assign': visitor_mixins.AssignMixin,
}
def count_pt_of_speech_in_tree(
tree_path: Path,
node_types: Iterable,
target_part='VB',
exclude_dunder=True,
exclude_private=False,
):
"""Return (Counter(matching_names), num_nodes_explored) tuple."""
mixins = []
for node_type in node_types:
if node_type in NODE_TYPES.keys():
mixins.append(NODE_TYPES[node_type])
mixins.append(NodeVisitor)
Visitor = type('Visitor', tuple(mixins), {})
tree = ast.parse(tree_path.read_bytes())
visitor = Visitor(target_part, exclude_dunder=exclude_dunder, exclude_private=exclude_private)
visitor.visit(tree)
return visitor.counter, visitor.nodes_explored
| [
"[email protected]"
]
| |
9891cf6da61e8f98c7f2e72b7c09beb73584ed3a | b1e6e029dee2c8c2d9c2439e4295b85bfed36469 | /zad1.py | 51d35c733991ac4582cf130eb7f481e5da9d4121 | []
| no_license | InsaneM666/cw2_programowanie | b12d9ef50634bc6594be64e37a863e271d29a57b | a86e7c99e85d4f6a920460f40058629f425ba1bd | refs/heads/master | 2022-09-11T23:24:34.641223 | 2014-11-14T11:55:22 | 2014-11-14T11:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | import time
#Uzytkownik podaje dowolny plik tekstowy, oczywiscie z zalozeniem ze jest w tym samym folderze co program, jesli nie musialby podac cala sciezke
sciezka=raw_input("Podaj nazwe pliku do analizowania (wraz z rozszerzeniem!): ")
plik=open(str(sciezka),"r")
tekst=[] #plik wejsciowy czysto po odczytaniu
for linia in plik:
linijka=linia.split(" ")
tekst.append(linijka)
plik.close()
zn=0 #flaga do znakow interpunkcyjnych i zalamania linii
tekst2=[] #tekst bez znakow interpunkcyjnych i zalamania linii, tworzony w ponizszej petli, ponadto usuwa wielkie litery
for i in xrange(0,len(tekst)):
for slowo in tekst[i]:
for znak in slowo:
if znak==".":
zn=1
elif znak==",":
zn=2
elif znak=="\n":
zn=3
if zn==1:
tekst2.append(str(slowo).replace(".","").lower())
elif zn==2:
tekst2.append(str(slowo).replace(",","").lower())
elif zn==3:
tekst2.append(str(slowo).replace("\n","").lower())
else:
tekst2.append(str(slowo).lower())
#Zbior slow unikalnych z wykluczeniem pustych miejsc i spacji jako slow
unikalne=set(tekst2)
unikalne.discard("")
unikalne.discard(" ")
wynik=[] #wynikowa lista, ktora zawiera krotki z nazwa slowa i iloscia jego wystepowania w tekscie
for element in unikalne:
krotka=(element,tekst2.count(element))
wynik.append(krotka)
#Zapisanie do pliku
plik=open("statystyki.txt","a")
plik.writelines("\nSTATYSTYKI dla pliku tekstowego, stan na " + time.strftime("%H:%M dnia:%d.%m.%Y")+"\n")
for krotka in wynik:
if krotka[1]!=1:
plik.writelines( "Slowo '" +str(krotka[0]) + "' wystepuje "+ str(krotka[1])+ " razy.\n")
else:
plik.writelines( "Slowo '" +str(krotka[0]) + "' wystepuje "+ str(krotka[1])+ " raz.\n")
plik.close()
print "Statystyki przeprowadzono i dopisano do pliku 'statystyki.txt'." | [
"[email protected]"
]
| |
e409feb2418cef1b05aab8d5cafb1f0a54bf02ae | 0df96f1d85717a72c9b989e133845a6c6bb73129 | /src/main.py | 02cc3d754b0824331b92fbd79e5878bf3efc9df2 | []
| no_license | madrigals1/table_api | 212aac3cfaedd1286486661cb80234be12935f7b | 0f4e230dee1de6c7f700444b7b2b86a420ce4c70 | refs/heads/master | 2023-03-28T11:28:05.237635 | 2021-04-01T15:10:08 | 2021-04-01T15:10:08 | 308,363,547 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from flask import Flask, jsonify
from src.utils import create_png_from_dict
from flask import request
app = Flask(__name__)
@app.route("/")
def index():
return jsonify(
detail="Table API is working. Use /convert to convert dict into table PNG"
)
@app.route("/convert", methods=["POST", "GET"])
def convert():
if request.method == "GET":
return jsonify(
detail="Please, make POST request and provide 'table' in request body"
)
data = request.json
table = data.get("table")
if not table:
return jsonify(detail="Please, provide 'table' in request body")
return {"link": create_png_from_dict(table)}
| [
"[email protected]"
]
| |
8e40d17b1e7ee23dfb1b35a3e3c00db329b41ea3 | dedcad2f2cb8004437cdd63b3560b0c65a83b0f7 | /python/Juno-Show/border1-show.py | 958566a7abd3a1a724a3feba04592240730c64ff | []
| no_license | dlakey/Don-RePo | e488f54c3bafe2a3eb297aca8b5ab500ff4048fe | 1afdeafb24a356b6fc91813117949339c635107d | refs/heads/master | 2021-01-20T02:25:46.538963 | 2018-05-05T15:53:23 | 2018-05-05T15:53:23 | 89,405,034 | 0 | 0 | null | 2017-05-22T04:21:52 | 2017-04-25T20:43:34 | Roff | UTF-8 | Python | false | false | 235 | py | import jnpr.junos
import os
import sys
from pprint import pprint
from jnpr.junos import Device
r0 = Device(host='172.16.1.250',user='root',password='Juniper').open()
response = r0.cli('show configuration | display set')
print response
| [
"[email protected]"
]
| |
2c730e56bc9a5b4983cd6e076bc899c0964737a6 | 58e15fca047ece58bfa391a6efb88ec1b46997b2 | /venv/lib/python3.8/site-packages/selenium/webdriver/common/actions/wheel_input.py | 64bb6db22df1194f5b148b59bd4fabff2289bdc4 | []
| no_license | su2708/WebScraper | 6594f102336de0bb0ebc2c3feafdb4d87da52ad3 | a084afce4a8dacb4d4beb43924aa85a831c3b4ec | refs/heads/main | 2023-02-20T15:43:37.246448 | 2023-01-15T17:26:28 | 2023-01-15T17:26:28 | 323,025,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/f6/5c/a4/99012d79ffc30cf1b681f61f329c7ebd8c280d8c5fcc537385cf91bd63 | [
"[email protected]"
]
| |
b1ce34d6104532c2b825d24a36c9abbea3db79c8 | 67699ac3e2813dfd42c02489d67bdbd3d9d28b41 | /apigo.py | b81e7be6939f7f5bc0f99756796aa02cb223f16a | [
"CC0-1.0"
]
| permissive | christianbos/smsmap | 884b1bc13901f688986f71259b2c6e69cafa0de7 | 650efcdeffdf337d1d8748f042b6246a7300c407 | refs/heads/master | 2016-08-11T17:02:46.684911 | 2016-03-06T19:13:03 | 2016-03-06T19:13:03 | 53,243,002 | 0 | 0 | null | 2016-03-06T16:59:06 | 2016-03-06T07:23:58 | Python | UTF-8 | Python | false | false | 311 | py | import requests
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'sensor': 'false', 'address': 'rio suchiate, colinas del lago'}
r = requests.get(url, params=params)
results = r.json()['results']
location = results[0]['geometry']['location']
location['lat'], location['lng']
print(location)
| [
"[email protected]"
]
| |
ab91542f9a2835ef223506eb04c12d468ce365f8 | b9676dfd72aaa92e0167ea305bb0a46e022d4d9e | /django-essencial/0-django-startproject/hello/hello/urls.py | 283d6ff750922a6c57d77f38acdc55908cdd53e4 | []
| no_license | GuidoBR/learning-python | c3ea91419adbc80e01c75b744c02c5e7290c4479 | 032ac862bf0660c535879e1985ca63a10296df17 | refs/heads/master | 2023-05-26T01:01:54.654117 | 2022-10-28T12:51:47 | 2022-10-28T12:51:47 | 25,278,144 | 0 | 0 | null | 2023-05-22T21:33:34 | 2014-10-15T23:36:37 | Python | UTF-8 | Python | false | false | 761 | py | """hello URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
]
| |
54dfeffbc029f3eb10e189a93c48b3df35ca9d62 | 74313467bf7c950897d11ee131595e87b7aeabb9 | /apps/pipitor/telega/admin.py | 79810af556243052f7f19494cc39a722670adf60 | []
| no_license | cobravsninja/telekafka | 8442cfff0b890580db6f2c830fea42c24ce1813a | d280036fa664f2fc2b36dde1457540b92e4431a3 | refs/heads/master | 2020-06-19T04:42:58.238408 | 2019-07-17T08:33:14 | 2019-07-17T08:33:14 | 196,566,576 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.contrib import admin
# Register your models here.
from .models import (
ActiveGoogleSearch,
ActiveInstagramSearch,
GoogleUrl,
InstagramUrl
)
admin.site.register(ActiveGoogleSearch)
admin.site.register(ActiveInstagramSearch)
admin.site.register(GoogleUrl)
admin.site.register(InstagramUrl)
| [
"[email protected]"
]
| |
450e45abb2e6f78121b9289dfc49ce668ece839a | 5fa293d0ef6f3bdc4791d869cf503b107cc3a5fb | /soap_client/urls.py | 5e70df6a51ac6d70d04e1a6e01da2fd63ec1b6aa | [
"MIT"
]
| permissive | alldevic/nav_info | 0779ab116dd7718ac1d63fecfbc2d47dd8863c22 | 32681d1cd3ad43472c8f7fb49922094c4045111c | refs/heads/master | 2023-08-27T00:23:57.233994 | 2021-11-05T15:24:48 | 2021-11-05T15:24:48 | 278,404,502 | 0 | 0 | MIT | 2021-11-05T15:24:49 | 2020-07-09T15:37:41 | Python | UTF-8 | Python | false | false | 317 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from soap_client import views
router = DefaultRouter()
router.register('raw', views.RawViewSet, basename='raw')
router.register('data', views.DataViewSet, basename='data')
urlpatterns = [
path('', include(router.urls)),
]
| [
"[email protected]"
]
| |
9dca95f0eadc9b7576cb73579313ffa2ab36aaa3 | 444670e6d73ae9d95c0bb0459c8e02423876d2fb | /pycharm/LoginSite/mylogin/migrations/0001_initial.py | 08c4cb3c5cfd13d3c86c5e92dc2a59b4d175f342 | []
| no_license | zhangxingxing12138/web-pycharm | c8b6822be95bfb904f81f772185fe9e17fc77fc3 | 5f212e6805b0734aa3c791830526a95b24a930f4 | refs/heads/master | 2020-04-04T18:03:45.458309 | 2018-11-08T12:03:51 | 2018-11-08T12:03:51 | 156,148,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-06 00:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-c_time'],
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
),
]
| [
"[email protected]"
]
| |
12d339f30ce38a04f1c2b747619c8fd0a1edc622 | ebed065ca38c964aef3173138542e06d53f257f5 | /web_chat/forms.py | 2d64241e353a0de6854e7376fca0fb5f6b1f2895 | []
| no_license | Megachell0/simple-chat-api | d40e8de7e3e682480b393ef53e36edf942358caf | ed10d365a7e5ea7aa2faeb5853ccc06ebf989934 | refs/heads/master | 2023-02-10T12:18:24.769993 | 2021-01-03T20:05:06 | 2021-01-03T20:05:06 | 326,023,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py |
'''
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(label='E-mail')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
widgets = {
'username':forms.TextInput(attrs ={"class":"form-control"}),
'email':forms.EmailInput(attrs ={"class":"form-control"}),
'password1':forms.PasswordInput(attrs ={"class":"form-control"}),
'password2':forms.PasswordInput(attrs ={"class":"form-control"}),
}
def __init__(self, *args, **kwargs):
super(UserRegisterForm, self).__init__(*args, **kwargs)
self.fields['username'].widget = forms.TextInput(attrs ={"class":"form-control"})
self.fields['email'].widget = forms.EmailInput(attrs ={"class":"form-control"})
self.fields['password1'].widget = forms.PasswordInput(attrs ={"class":"form-control"})
self.fields['password2'].widget = forms.PasswordInput(attrs ={"class":"form-control"})
''' | [
"[email protected]"
]
| |
da8bfd38abbbf54a7826c085acb0360c43d51b33 | 2cc7d7b2e2219a7b7d00027ec5c3106eab10a30c | /send_winks.py | 2659ef31381fd850b7e1675a9a88608abf3c1812 | []
| no_license | 9whoami/match_parser | 409c6831edb232b70b97d0a560b8007bccf2b832 | 5bdffd990d09703861924f5856875ede7bc66258 | refs/heads/master | 2020-06-10T09:11:41.156289 | 2016-12-08T21:37:29 | 2016-12-08T21:37:29 | 75,976,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import settings as config
import sys
from argparse import ArgumentParser
from imports import base_error
from imports import Logger
from imports import ThreadPool
from imports import ApiMethods
from imports import WebDriver
from sites.match import signin
from sites.match import send_winks
__author__ = 'whoami'
__version__ = '0.0.0'
__date__ = '07.03.16 18:21'
__description__ = """
Description for the python module
"""
@ThreadPool.thread
def start_winks(**kwargs):
api = kwargs['api'] = ApiMethods(custom_id=kwargs['uid'])
logger = Logger(api)
try:
browser = WebDriver(**kwargs)
if not api.winks_continue():
raise base_error.WorkingEnd(
message="Работа завершена пользователем", api=api)
signin(browser, **kwargs)
send_winks(browser, **kwargs)
except Exception as e:
logger.debug("Поймано исключение с сообщением {!r}".format(str(e)))
finally:
base_error.raising(base_error.WorkingEnd, api=api)
return
def main(uid, btn_freeze):
thread_pool = ThreadPool(max_threads=config.thread_count)
api = ApiMethods()
match_data = api.get_signin_data(**dict(id=uid))
timeout_winks = [match_data.__next__(), match_data.__next__()]
for signin_data in match_data:
working_data = dict(
uid=signin_data['id'],
sleep=timeout_winks,
login=signin_data['email'],
acc_pass=signin_data['acc_pass'],
name=signin_data['name'],
# proxy="69.113.241.140:9752",
proxy=signin_data.get('socks'),
proxy_type=config.proxy_type,
user_agent=signin_data['user_agent'],
online=bool(signin_data.get('online')),
btn_freeze=btn_freeze,
answer=signin_data.get('answer'))
start_winks(**working_data)
thread_pool.loop()
def create_parser():
parser = ArgumentParser(prog='WinksForFree',
description='Подмигивает пользователям на сайте',
epilog='''(c) April 2016. Автор программы, как всегда,
не несет никакой ответственности ни за что.''',
add_help=False
)
parser.add_argument('-h', '--help', action='help', help='Справка')
parser.add_argument('-i', '--id', nargs='?', default=None, type=int,
help='Параметр id', metavar='ID')
parser.add_argument('-f', '--freeze', nargs='?', default=20, type=int,
help='Таймаут после нажатия на кнопку',
metavar='КОЛИЧЕСТВО')
return parser
if __name__ in "__main__":
arguments = create_parser().parse_args(sys.argv[1:])
uid = arguments.id
btn_freeze = arguments.freeze
if uid is not None:
config.thread_count = 1
# uid = int(input('Type id:'))
main(uid, btn_freeze)
| [
"[email protected]"
]
| |
ec6651c3903b2913600b233e85032b05d63dd901 | 3f1873d63ccb215ec88105661f1cd44927e222b3 | /dl4mir/chords/models.py | 2a95e79cc0b937d4a3c2becd586573782efa5c39 | []
| no_license | bmcfee/dl4mir | 50dd467687991163a819fc7560b9d9f9c4c6e568 | c9438a221d1e853c25e1867673c1c3efca2e1756 | refs/heads/master | 2023-07-11T10:35:54.041154 | 2016-01-20T03:57:58 | 2016-01-20T03:57:58 | 69,293,002 | 1 | 0 | null | 2016-09-26T21:09:15 | 2016-09-26T21:09:14 | null | UTF-8 | Python | false | false | 38,495 | py | import optimus
import dl4mir.chords.labels as L
import numpy as np
TIME_DIM = 20
VOCAB = 157
GRAPH_NAME = "classifier-V%03d" % VOCAB
def classifier_init(nodes):
for n in nodes:
for p in n.params.values():
if 'classifier' in n.name and 'bias' in p.name:
continue
optimus.random_init(p)
def i8c4b10_nll_dropout(size='large'):
k0, k1, k2 = dict(
large=(24, 48, 64))[size]
input_data = optimus.Input(
name='cqt',
shape=(None, 1, 8, 252))
chord_idx = optimus.Input(
name='chord_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
dropout = optimus.Input(
name='dropout',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(k0, None, 3, 13),
pool_shape=(1, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, 3, 37),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, 3, 33),
act_type='relu')
layer3 = optimus.Conv3D(
name='layer3',
input_shape=layer2.output.shape,
weight_shape=(10, None, 2, 1),
act_type='relu')
chord_classifier = optimus.Conv3D(
name='chord_classifier',
input_shape=layer3.output.shape,
weight_shape=(13, None, 1, 1),
act_type='linear')
flatten = optimus.Flatten('flatten', 2)
null_classifier = optimus.Affine(
name='null_classifier',
input_shape=layer3.output.shape,
output_shape=(None, 1),
act_type='linear')
cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
softmax = optimus.Softmax('softmax')
param_nodes = [layer0, layer1, layer2, layer3,
null_classifier, chord_classifier]
misc_nodes = [flatten, cat, softmax]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex(name='likelihoods')
log = optimus.Log(name='log')
neg = optimus.Gain(name='gain')
neg.weight.value = -1.0
loss = optimus.Mean(name='negative_log_likelihood')
loss_nodes = [likelihoods, log, neg, loss]
total_loss = optimus.Output(name='total_loss')
layer0.enable_dropout()
layer1.enable_dropout()
layer2.enable_dropout()
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, layer3.input),
(layer3.output, chord_classifier.input),
(layer3.output, null_classifier.input),
(chord_classifier.output, flatten.input),
(flatten.output, cat.input_0),
(null_classifier.output, cat.input_1),
(cat.output, softmax.input)]
trainer_edges = optimus.ConnectionManager(
base_edges + [
(dropout, layer0.dropout),
(dropout, layer1.dropout),
(dropout, layer2.dropout),
(softmax.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, log.input),
(log.output, neg.input),
(neg.output, loss.input),
(loss.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes[:-1]) +
map(lambda n: (learning_rate, n.bias), param_nodes[:-1]))
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data, chord_idx, learning_rate, dropout],
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[total_loss],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
classifier_init(param_nodes[:-1])
semitones = L.semitone_matrix(157)[:13, 2:]
chord_classifier.weights.value = semitones.reshape(13, 10, 1, 1)
posterior = optimus.Output(name='posterior')
predictor_edges = optimus.ConnectionManager(
base_edges + [(softmax.output, posterior)])
layer0.disable_dropout()
layer1.disable_dropout()
layer2.disable_dropout()
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=predictor_edges.connections,
outputs=[posterior])
return trainer, predictor
def iXc3_nll(n_in, size='large', use_dropout=False):
k0, k1, k2 = dict(
small=(10, 20, 40),
med=(12, 24, 48),
large=(16, 32, 64),
xlarge=(20, 40, 80),
xxlarge=(24, 48, 96))[size]
n0, n1, n2 = {
1: (1, 1, 1),
4: (3, 2, 1),
8: (5, 3, 2),
10: (3, 3, 1),
20: (5, 5, 1)}[n_in]
p0, p1, p2 = {
1: (1, 1, 1),
4: (1, 1, 1),
8: (1, 1, 1),
10: (2, 2, 1),
12: (2, 2, 1),
20: (2, 2, 2)}[n_in]
input_data = optimus.Input(
name='data',
shape=(None, 1, n_in, 252))
chord_idx = optimus.Input(
name='class_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
inputs = [input_data, chord_idx, learning_rate]
dropout = optimus.Input(
name='dropout',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(k0, None, n0, 13),
pool_shape=(p0, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, n1, 37),
pool_shape=(p1, 1),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, n2, 33),
pool_shape=(p2, 1),
act_type='relu')
dropout_edges = []
if use_dropout:
layer0.enable_dropout()
layer1.enable_dropout()
layer2.enable_dropout()
inputs += [dropout]
dropout_edges += [(dropout, layer0.dropout),
(dropout, layer1.dropout),
(dropout, layer2.dropout)]
chord_classifier = optimus.Conv3D(
name='chord_classifier',
input_shape=layer2.output.shape,
weight_shape=(13, None, 1, 1),
act_type='linear')
flatten = optimus.Flatten('flatten', 2)
null_classifier = optimus.Affine(
name='null_classifier',
input_shape=layer2.output.shape,
output_shape=(None, 1),
act_type='linear')
cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
softmax = optimus.Softmax('softmax')
prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
prior.weight.value = np.ones([1, 157])
param_nodes = [layer0, layer1, layer2, null_classifier, chord_classifier]
misc_nodes = [flatten, cat, softmax, prior]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex(name='likelihoods')
log = optimus.Log(name='log')
neg = optimus.Multiply(name='gain', weight_shape=None)
neg.weight.value = -1.0
loss = optimus.Mean(name='negative_log_likelihood')
loss_nodes = [likelihoods, log, neg, loss]
total_loss = optimus.Output(name='total_loss')
# features = optimus.Output(name='features')
posterior = optimus.Output(name='posterior')
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chord_classifier.input),
(layer2.output, null_classifier.input),
# (layer2.output, features),
(chord_classifier.output, flatten.input),
(flatten.output, cat.input_0),
(null_classifier.output, cat.input_1),
(cat.output, softmax.input),
(softmax.output, prior.input),
(prior.output, posterior)]
trainer_edges = optimus.ConnectionManager(
base_edges + dropout_edges + [
(softmax.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, log.input),
(log.output, neg.input),
(neg.output, loss.input),
(loss.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
classifier_init(param_nodes)
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=inputs,
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[total_loss, posterior], # features],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
if use_dropout:
layer0.disable_dropout()
layer1.disable_dropout()
layer2.disable_dropout()
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=optimus.ConnectionManager(base_edges).connections,
outputs=[posterior], # features],
verbose=True)
return trainer, predictor
def iXc3_fc_nll(n_in, size='large', use_dropout=False):
k0, k1, k2 = dict(
small=(10, 20, 40),
med=(12, 24, 48),
large=(16, 32, 64),
xlarge=(20, 40, 80),
xxlarge=(24, 48, 96))[size]
n0, n1, n2 = {
1: (1, 1, 1),
4: (3, 2, 1),
8: (5, 3, 2),
10: (3, 3, 1),
20: (5, 5, 1)}[n_in]
p0, p1, p2 = {
1: (1, 1, 1),
4: (1, 1, 1),
8: (1, 1, 1),
10: (2, 2, 1),
12: (2, 2, 1),
20: (2, 2, 2)}[n_in]
input_data = optimus.Input(
name='data',
shape=(None, 1, n_in, 252))
chord_idx = optimus.Input(
name='class_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
inputs = [input_data, chord_idx, learning_rate]
dropout = optimus.Input(
name='dropout',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(k0, None, n0, 13),
pool_shape=(p0, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, n1, 37),
pool_shape=(p1, 1),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, n2, 33),
pool_shape=(p2, 1),
act_type='relu')
dropout_edges = []
if use_dropout:
layer0.enable_dropout()
layer1.enable_dropout()
layer2.enable_dropout()
inputs += [dropout]
dropout_edges += [(dropout, layer0.dropout),
(dropout, layer1.dropout),
(dropout, layer2.dropout)]
chord_classifier = optimus.Affine(
name='chord_classifier',
input_shape=layer2.output.shape,
output_shape=(None, VOCAB),
act_type='softmax')
prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
prior.weight.value = np.ones([1, 157])
param_nodes = [layer0, layer1, layer2, chord_classifier]
misc_nodes = [prior]
# 1.1 Create Loss
nll = optimus.NegativeLogLikelihoodLoss(name='negative_log_likelihood')
total_loss = optimus.Output(name='total_loss')
# features = optimus.Output(name='features')
posterior = optimus.Output(name='posterior')
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chord_classifier.input),
(chord_classifier.output, prior.input),
(prior.output, posterior)]
trainer_edges = optimus.ConnectionManager(
base_edges + dropout_edges + [
(chord_classifier.output, nll.likelihoods),
(chord_idx, nll.index),
(nll.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
classifier_init(param_nodes)
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=inputs,
nodes=param_nodes + misc_nodes + [nll],
connections=trainer_edges.connections,
outputs=[total_loss, posterior],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
if use_dropout:
layer0.disable_dropout()
layer1.disable_dropout()
layer2.disable_dropout()
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=optimus.ConnectionManager(base_edges).connections,
outputs=[posterior],
verbose=True)
return trainer, predictor
def iXc3_nll2(n_in, size='large', use_dropout=False):
k0, k1, k2 = dict(
small=(10, 20, 40),
med=(12, 24, 48),
large=(16, 32, 64),
xlarge=(20, 40, 80),
xxlarge=(24, 48, 96))[size]
n0, n1, n2 = {
1: (1, 1, 1),
4: (3, 2, 1),
8: (5, 3, 2),
10: (3, 3, 1),
20: (5, 5, 1)}[n_in]
p0, p1, p2 = {
1: (1, 1, 1),
4: (1, 1, 1),
8: (1, 1, 1),
10: (2, 2, 1),
12: (2, 2, 1),
20: (2, 2, 2)}[n_in]
input_data = optimus.Input(
name='data',
shape=(None, 1, n_in, 252))
chord_idx = optimus.Input(
name='class_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
inputs = [input_data, chord_idx, learning_rate]
dropout = optimus.Input(
name='dropout',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(k0, None, n0, 13),
pool_shape=(p0, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, n1, 37),
pool_shape=(p1, 1),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, n2, 33),
pool_shape=(p2, 1),
act_type='relu')
dropout_edges = []
if use_dropout:
layer0.enable_dropout()
layer1.enable_dropout()
layer2.enable_dropout()
inputs += [dropout]
dropout_edges += [(dropout, layer0.dropout),
(dropout, layer1.dropout),
(dropout, layer2.dropout)]
chord_classifier = optimus.Conv3D(
name='chord_classifier',
input_shape=layer2.output.shape,
weight_shape=(13, None, 1, 1),
act_type='linear')
flatten = optimus.Flatten('flatten', 2)
null_classifier = optimus.Affine(
name='null_classifier',
input_shape=layer0.output.shape,
output_shape=(None, 1),
act_type='linear')
cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
softmax = optimus.Softmax('softmax')
prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
prior.weight.value = np.ones([1, 157])
param_nodes = [layer0, layer1, layer2, null_classifier, chord_classifier]
misc_nodes = [flatten, cat, softmax, prior]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex(name='likelihoods')
log = optimus.Log(name='log')
neg = optimus.Multiply(name='gain', weight_shape=None)
neg.weight.value = -1.0
loss = optimus.Mean(name='negative_log_likelihood')
loss_nodes = [likelihoods, log, neg, loss]
total_loss = optimus.Output(name='total_loss')
posterior = optimus.Output(name='posterior')
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chord_classifier.input),
(layer0.output, null_classifier.input),
(chord_classifier.output, flatten.input),
(flatten.output, cat.input_0),
(null_classifier.output, cat.input_1),
(cat.output, softmax.input),
(softmax.output, prior.input),
(prior.output, posterior)]
trainer_edges = optimus.ConnectionManager(
base_edges + dropout_edges + [
(softmax.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, log.input),
(log.output, neg.input),
(neg.output, loss.input),
(loss.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
classifier_init(param_nodes)
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=inputs,
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[total_loss, posterior],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
if use_dropout:
layer0.disable_dropout()
layer1.disable_dropout()
layer2.disable_dropout()
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=optimus.ConnectionManager(base_edges).connections,
outputs=[posterior],
verbose=True)
return trainer, predictor
def i8x1c3T_nll2(size, use_dropout=False):
k0, k1, k2 = dict(
small=(10, 20, 40),
med=(12, 24, 48),
large=(16, 32, 64),
xlarge=(20, 40, 80),
xxlarge=(24, 48, 96))[size]
n0, n1, n2 = (3, 3, 4)
p0, p1, p2 = (1, 1, 1)
input_data = optimus.Input(
name='data',
shape=(None, 8, 1, 252))
chord_idx = optimus.Input(
name='class_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
inputs = [input_data, chord_idx, learning_rate]
dropout = optimus.Input(
name='dropout',
shape=None)
# 1.2 Create Nodes
transpose = optimus.Dimshuffle('rearrange', axes=(0, 2, 1, 3))
layer0 = optimus.Conv3D(
name='layer0',
input_shape=(None, 1, 8, 252),
weight_shape=(k0, None, n0, 13),
pool_shape=(p0, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, n1, 37),
pool_shape=(p1, 1),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, n2, 33),
pool_shape=(p2, 1),
act_type='relu')
dropout_edges = []
if use_dropout:
layer0.enable_dropout()
layer1.enable_dropout()
layer2.enable_dropout()
inputs += [dropout]
dropout_edges += [(dropout, layer0.dropout),
(dropout, layer1.dropout),
(dropout, layer2.dropout)]
chord_classifier = optimus.Conv3D(
name='chord_classifier',
input_shape=layer2.output.shape,
weight_shape=(13, None, 1, 1),
act_type='linear')
flatten = optimus.Flatten('flatten', 2)
null_classifier = optimus.Affine(
name='null_classifier',
input_shape=layer0.output.shape,
output_shape=(None, 1),
act_type='linear')
cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
softmax = optimus.Softmax('softmax')
prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
prior.weight.value = np.ones([1, 157])
param_nodes = [layer0, layer1, layer2, null_classifier, chord_classifier]
misc_nodes = [transpose, flatten, cat, softmax, prior]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex(name='likelihoods')
log = optimus.Log(name='log')
neg = optimus.Multiply(name='gain', weight_shape=None)
neg.weight.value = -1.0
loss = optimus.Mean(name='negative_log_likelihood')
loss_nodes = [likelihoods, log, neg, loss]
total_loss = optimus.Output(name='total_loss')
posterior = optimus.Output(name='posterior')
# 2. Define Edges
base_edges = [
(input_data, transpose.input),
(transpose.output, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chord_classifier.input),
(layer0.output, null_classifier.input),
(chord_classifier.output, flatten.input),
(flatten.output, cat.input_0),
(null_classifier.output, cat.input_1),
(cat.output, softmax.input),
(softmax.output, prior.input),
(prior.output, posterior)]
trainer_edges = optimus.ConnectionManager(
base_edges + dropout_edges + [
(softmax.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, log.input),
(log.output, neg.input),
(neg.output, loss.input),
(loss.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
classifier_init(param_nodes)
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=inputs,
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[total_loss, posterior],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
if use_dropout:
layer0.disable_dropout()
layer1.disable_dropout()
layer2.disable_dropout()
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=optimus.ConnectionManager(base_edges).connections,
outputs=[posterior],
verbose=True)
return trainer, predictor
def i8x1a3T_nll2(size, use_dropout=False):
k0, k1, k2 = dict(
large=(2048, 2048, 40),)[size]
input_data = optimus.Input(
name='data',
shape=(None, 8, 1, 252))
chord_idx = optimus.Input(
name='class_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
inputs = [input_data, chord_idx, learning_rate]
dropout = optimus.Input(
name='dropout',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Affine(
name='layer0',
input_shape=input_data.shape,
output_shape=(None, k0),
act_type='relu')
layer1 = optimus.Affine(
name='layer1',
input_shape=layer0.output.shape,
output_shape=(None, k1),
act_type='relu')
layer2 = optimus.Affine(
name='layer2',
input_shape=layer1.output.shape,
output_shape=(None, k2, 1, 12),
act_type='relu')
dropout_edges = []
if use_dropout:
layer0.enable_dropout()
layer1.enable_dropout()
layer2.enable_dropout()
inputs += [dropout]
dropout_edges += [(dropout, layer0.dropout),
(dropout, layer1.dropout),
(dropout, layer2.dropout)]
chord_classifier = optimus.Conv3D(
name='chord_classifier',
input_shape=layer2.output.shape,
weight_shape=(13, None, 1, 1),
act_type='linear')
flatten = optimus.Flatten('flatten', 2)
null_classifier = optimus.Affine(
name='null_classifier',
input_shape=layer0.output.shape,
output_shape=(None, 1),
act_type='linear')
cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
softmax = optimus.Softmax('softmax')
prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
prior.weight.value = np.ones([1, 157])
param_nodes = [layer0, layer1, layer2, null_classifier, chord_classifier]
misc_nodes = [flatten, cat, softmax, prior]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex(name='likelihoods')
log = optimus.Log(name='log')
neg = optimus.Multiply(name='gain', weight_shape=None)
neg.weight.value = -1.0
loss = optimus.Mean(name='negative_log_likelihood')
loss_nodes = [likelihoods, log, neg, loss]
total_loss = optimus.Output(name='total_loss')
posterior = optimus.Output(name='posterior')
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chord_classifier.input),
(layer0.output, null_classifier.input),
(chord_classifier.output, flatten.input),
(flatten.output, cat.input_0),
(null_classifier.output, cat.input_1),
(cat.output, softmax.input),
(softmax.output, prior.input),
(prior.output, posterior)]
trainer_edges = optimus.ConnectionManager(
base_edges + dropout_edges + [
(softmax.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, log.input),
(log.output, neg.input),
(neg.output, loss.input),
(loss.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
classifier_init(param_nodes)
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=inputs,
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[total_loss, posterior],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
if use_dropout:
layer0.disable_dropout()
layer1.disable_dropout()
layer2.disable_dropout()
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=optimus.ConnectionManager(base_edges).connections,
outputs=[posterior],
verbose=True)
return trainer, predictor
def i8c3_pwmse(size='large'):
k0, k1, k2 = dict(
small=(8, 16, 20),
med=(12, 24, 32),
large=(16, 32, 48))[size]
input_data = optimus.Input(
name='cqt',
shape=(None, 1, 8, 252))
target = optimus.Input(
name='target',
shape=(None, 1))
chord_idx = optimus.Input(
name='chord_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(k0, None, 3, 13),
pool_shape=(1, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, 3, 37),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, 3, 33),
act_type='relu')
chord_classifier = optimus.Conv3D(
name='chord_classifier',
input_shape=layer2.output.shape,
weight_shape=(13, None, 2, 1),
act_type='sigmoid')
flatten = optimus.Flatten('flatten', 2)
null_classifier = optimus.Affine(
name='null_classifier',
input_shape=layer2.output.shape,
output_shape=(None, 1),
act_type='sigmoid')
cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
param_nodes = [layer0, layer1, layer2, chord_classifier, null_classifier]
misc_nodes = [flatten, cat]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex(name='likelihoods')
dimshuffle = optimus.Dimshuffle('dimshuffle', (0, 'x'))
squared_error = optimus.SquaredEuclidean(name='squared_error')
loss = optimus.Mean(name='mean_squared_error')
loss_nodes = [likelihoods, dimshuffle, squared_error, loss]
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chord_classifier.input),
(layer2.output, null_classifier.input),
(chord_classifier.output, flatten.input),
(flatten.output, cat.input_0),
(null_classifier.output, cat.input_1)]
trainer_edges = optimus.ConnectionManager(
base_edges + [
(cat.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, dimshuffle.input),
(dimshuffle.output, squared_error.input_a),
(target, squared_error.input_b),
(squared_error.output, loss.input)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data, target, chord_idx, learning_rate],
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[loss.output],
loss=loss.output,
updates=update_manager.connections,
verbose=True)
classifier_init(param_nodes)
posterior = optimus.Output(name='posterior')
predictor_edges = optimus.ConnectionManager(
base_edges + [(cat.output, posterior)])
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=predictor_edges.connections,
outputs=[posterior])
return trainer, predictor
def wcqt_likelihood_wmoia(n_dim=VOCAB):
input_data = optimus.Input(
name='cqt',
shape=(None, 6, TIME_DIM, 40))
target = optimus.Input(
name='target',
shape=(None, 1))
chord_idx = optimus.Input(
name='chord_idx',
shape=(None,),
dtype='int32')
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(32, None, 5, 5),
pool_shape=(2, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(64, None, 5, 7),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(128, None, 3, 6),
act_type='relu')
layer3 = optimus.Affine(
name='layer3',
input_shape=layer2.output.shape,
output_shape=(None, 1024,),
act_type='relu')
chord_estimator = optimus.Affine(
name='chord_estimator',
input_shape=layer3.output.shape,
output_shape=(None, n_dim),
act_type='sigmoid')
param_nodes = [layer0, layer1, layer2, layer3, chord_estimator]
# 1.1 Create Loss
likelihoods = optimus.SelectIndex('select')
dimshuffle = optimus.Dimshuffle('dimshuffle', (0, 'x'))
error = optimus.SquaredEuclidean(name='squared_error')
main_loss = optimus.Mean(name='mean_squared_error')
loss_nodes1 = [likelihoods, dimshuffle, error, main_loss]
negone = optimus.Gain(name='negate')
negone.weight.value = -1.0
summer = optimus.Add(name='moia_sum')
flatten = optimus.Sum('flatten', axis=1)
dimshuffle2 = optimus.Dimshuffle('dimshuffle2', (0, 'x'))
margin = optimus.RectifiedLinear(name='margin')
weight = optimus.Multiply(name="margin_weight")
margin_loss = optimus.Mean(name='margin_loss', axis=None)
loss_nodes2 = [negone, summer, margin, flatten,
dimshuffle2, margin_loss, weight]
total_loss = optimus.Add("total_loss")
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, layer3.input),
(layer3.output, chord_estimator.input)]
trainer_edges = optimus.ConnectionManager(
base_edges + [
(chord_estimator.output, likelihoods.input),
(chord_idx, likelihoods.index),
(likelihoods.output, dimshuffle.input),
(dimshuffle.output, error.input_a),
(target, error.input_b),
(error.output, main_loss.input),
# Margin loss
(dimshuffle.output, negone.input),
(negone.output, summer.input_list),
(chord_estimator.output, summer.input_list),
(summer.output, margin.input),
(margin.output, flatten.input),
(flatten.output, dimshuffle2.input),
(dimshuffle2.output, weight.input_a),
(target, weight.input_b),
(weight.output, margin_loss.input),
(margin_loss.output, total_loss.input_list),
(main_loss.output, total_loss.input_list)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
all_nodes = param_nodes + loss_nodes1 + loss_nodes2 + [total_loss]
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data, target, chord_idx, learning_rate],
nodes=all_nodes,
connections=trainer_edges.connections,
outputs=[total_loss.output],
loss=total_loss.output,
updates=update_manager.connections,
verbose=True)
for n in param_nodes:
for p in n.params.values():
optimus.random_init(p)
posterior = optimus.Output(
name='posterior')
predictor_edges = optimus.ConnectionManager(
base_edges + [(chord_estimator.output, posterior)])
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes,
connections=predictor_edges.connections,
outputs=[posterior])
return trainer, predictor
def i20c3_mse12(size='large'):
k0, k1, k2 = dict(
small=(10, 20, 40),
med=(12, 24, 48),
large=(16, 32, 64))[size]
input_data = optimus.Input(
name='cqt',
shape=(None, 1, 20, 252))
target = optimus.Input(
name='target',
shape=(None, 12))
learning_rate = optimus.Input(
name='learning_rate',
shape=None)
# 1.2 Create Nodes
layer0 = optimus.Conv3D(
name='layer0',
input_shape=input_data.shape,
weight_shape=(k0, None, 5, 13),
pool_shape=(2, 3),
act_type='relu')
layer1 = optimus.Conv3D(
name='layer1',
input_shape=layer0.output.shape,
weight_shape=(k1, None, 5, 37),
pool_shape=(2, 1),
act_type='relu')
layer2 = optimus.Conv3D(
name='layer2',
input_shape=layer1.output.shape,
weight_shape=(k2, None, 1, 33),
pool_shape=(2, 1),
act_type='relu')
chroma_estimator = optimus.Conv3D(
name='chord_classifier',
input_shape=layer2.output.shape,
weight_shape=(1, None, 1, 1),
act_type='sigmoid')
flatten = optimus.Flatten('flatten', 2)
param_nodes = [layer0, layer1, layer2, chroma_estimator]
misc_nodes = [flatten]
# 1.1 Create Loss
error = optimus.SquaredEuclidean(name='squared_error')
loss = optimus.Mean(name='mean_squared_error')
loss_nodes = [error, loss]
chroma = optimus.Output(name='chroma')
total_loss = optimus.Output(name='total_loss')
# 2. Define Edges
base_edges = [
(input_data, layer0.input),
(layer0.output, layer1.input),
(layer1.output, layer2.input),
(layer2.output, chroma_estimator.input),
(chroma_estimator.output, flatten.input),
(flatten.output, chroma)]
trainer_edges = optimus.ConnectionManager(
base_edges + [
(flatten.output, error.input_a),
(target, error.input_b),
(error.output, loss.input),
(loss.output, total_loss)])
update_manager = optimus.ConnectionManager(
map(lambda n: (learning_rate, n.weights), param_nodes) +
map(lambda n: (learning_rate, n.bias), param_nodes))
classifier_init(param_nodes)
trainer = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data, target, learning_rate],
nodes=param_nodes + misc_nodes + loss_nodes,
connections=trainer_edges.connections,
outputs=[total_loss, chroma],
loss=total_loss,
updates=update_manager.connections,
verbose=True)
predictor = optimus.Graph(
name=GRAPH_NAME,
inputs=[input_data],
nodes=param_nodes + misc_nodes,
connections=optimus.ConnectionManager(base_edges).connections,
outputs=[chroma])
return trainer, predictor
MODELS = {
'L': lambda: iXc3_nll(20, 'large'),
'XL': lambda: iXc3_nll(20, 'xlarge'),
'XXL': lambda: iXc3_nll(20, 'xxlarge'),
'XXL_dropout': lambda: iXc3_nll(20, 'xxlarge', True),
'XL_dropout': lambda: iXc3_nll(20, 'xlarge', True),
'L_dropout': lambda: iXc3_nll(20, 'large', True),
'L_fc': lambda: iXc3_fc_nll(20, 'large'),
'XL_fc': lambda: iXc3_fc_nll(20, 'xlarge'),
'XXL_fc': lambda: iXc3_fc_nll(20, 'xxlarge'),
'XXL_fc_dropout': lambda: iXc3_fc_nll(20, 'xxlarge', True),
'XL_fc_dropout': lambda: iXc3_fc_nll(20, 'xlarge', True),
'L_fc_dropout': lambda: iXc3_fc_nll(20, 'large', True)}
| [
"[email protected]"
]
| |
c9fc082fe4fbd76e705f0cb575bd33862df73465 | 686c9ccf7d15c1dbc1c5ea1ffd67bb8afc5a99ea | /tests/data/create_configs.py | 3fd3627b8cbcc2c11261afd36f22e208c9b134ea | [
"MIT"
]
| permissive | RJaikanth/torch-cv | 90129db1ae3dcdab445bd3c7437bd41f83770792 | 8102aaae840b674389f09a01c5c45df559cb3819 | refs/heads/master | 2023-01-20T18:41:24.929046 | 2020-10-31T11:21:01 | 2020-11-02T14:14:16 | 297,047,627 | 1 | 0 | MIT | 2020-09-30T13:11:23 | 2020-09-20T10:01:05 | Python | UTF-8 | Python | false | false | 1,245 | py | import os
from .file_strings import *
default_dir = "/tmp/torch-cv-test/config"
os.makedirs(default_dir, exist_ok=True)
def create_empty_config():
dest = os.path.join(default_dir, "empty.yml")
with open(dest, "w") as f:
pass
def create_none_config():
dest = os.path.join(default_dir, "none.yml")
with open(dest, "w") as f:
f.write(none_config)
def create_join_config():
dest = os.path.join(default_dir, "join.yml")
with open(dest, "w") as f:
f.write(join_config)
def create_preprocess_config_with_annotation():
dest = os.path.join(default_dir, "preprocess.yml")
with open(dest, "w") as f:
f.write(preprocess_config)
def create_custom_preprocess_config():
dest = os.path.join(default_dir, "custom_preprocess.yml")
with open(dest, "w") as f:
f.write(custom_preprocess_yml)
def create_custom_preprocess_config_fail():
dest = os.path.join(default_dir, "custom_preprocess_fail.yml")
with open(dest, "w") as f:
f.write(custom_preprocess_yml_fail)
def create_preprocess_config():
dest = os.path.join(default_dir, "preprocess_without_annotation.yml")
with open(dest, "w") as f:
f.write(preprocess_config_without_annotations)
| [
"[email protected]"
]
| |
5e3db41925d6b128b1ecd55c0ce2009b26a47a22 | 7c735a6ea6012c7f8a454987b5fc040c6a244580 | /staff/views.py | 39a2ef30ddc18b0c029da8b1c4e6abfbfb4ed155 | []
| no_license | mbabikir4/blog-11 | 031bcd19c24112f8e27cbbdbc3dd2ee36913d926 | 03be666e772a3585a7d0b1856bebf8553c19da21 | refs/heads/master | 2022-11-30T05:36:24.303298 | 2020-08-13T19:04:17 | 2020-08-13T19:04:17 | 287,359,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py |
from django.shortcuts import render,redirect
from .forms import CreateUserForm,CreateBlog
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.decorators import login_required
# Create your views here.
login_required(login_url='login')
def staff(request):
return render(request,'staff/staff.html')
def signup(request):
if request.user.is_authenticated:
return redirect('staff:staff')
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
messages.success(request,'Account created successfully')
return redirect('staff:login')
dicti = {'form':form}
return render(request,'staff/signup.html', dicti)
def logina(request):
if request.user.is_authenticated:
return redirect('staff:staff')
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username,password=password)
if user is not None:
login(request, user)
return redirect('staff:staff')
else:
messages.error(request,'Username or password is incorrect. ')
return render(request,'staff/login.html')
return render(request,'staff/login.html')
def logouta(request):
logout(request)
return redirect('staff:login')
def create(request):
form = CreateBlog()
if request.method == 'POST':
form = CreateBlog(request.POST,request.FILES)
if form.is_valid():
form.save()
return redirect('blog:blog')
blogs = {'form':form}
return render(request,'staff/create.html',blogs)
| [
"[email protected]"
]
| |
e1e2acf9091c80c3e8265213248d8fb20def628c | 7ad9c3d06c74e65670636522f894edebd9586f86 | /gauge.py | 628f757c49fee321bcccd55e36d39674f5bd1b76 | []
| no_license | rishavb123/GeneralizedRiemannIntegral | 05a57863a59e5ca831c175391beeec889df024ed | ac8ab3ee36985d4dd91ed2bd6009a35d585397db | refs/heads/master | 2022-09-17T06:23:07.092861 | 2020-06-07T04:41:50 | 2020-06-07T04:41:50 | 257,092,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | import numpy as np
from intervals import R
class Gauge:
def __init__(self, func, interval, validate=True):
self.delta = func
self.interval = interval
if validate:
self.__validate__()
def __validate__(self):
for x in self.interval.discretize():
assert self.delta(x) > 0
def __call__(self, x, validate=True):
if validate:
assert self.interval.contains(x)
return self.delta(x)
@staticmethod
def constant(delta):
assert delta > 0
return Gauge(lambda x:delta, R, validate=False) | [
"[email protected]"
]
| |
1a71d1d48c8c1e7899c78ae5ffffd819170fff8b | 0c5fed6415f7a307d0885d7579969d4e8f0121c8 | /Assignements/MT17143_Assignment5&6/MT17143_Problem1.py | fb5a51bc4753c66cf95906fd1944be7a9261bf8c | []
| no_license | akshitasawhney3008/Machine-Learning | 4032dfb3efaa7fdeb794913bb30e39f7a2fece31 | bef1672ecb51e07b291349af9df219d2bfea8f2d | refs/heads/master | 2023-02-02T16:22:01.016505 | 2020-12-10T10:37:49 | 2020-12-10T10:37:49 | 320,232,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | #MT17143 Akshita Sawhney
#Problem 1 RNA Sequencing
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import SpectralClustering
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import math
montpick= open("Read_count_rattus.txt",'r')
matrix = []
read_file = montpick.readlines()
for line in read_file: #file is extracted in a 2D matrix
row = []
list_of_words = line.split()
for i in range(1, len(list_of_words)):
row.append(int(list_of_words[i]))
matrix.append(row)
#Normalization
trc = 0 # total read count is calculated
for l in matrix:
for el in l:
trc+=el
sum=0
count=0
# print(len(matrix[1]))
for i in range(len(matrix[0])): # Sum of each column is calculated
column_sum = 0
for l in matrix:
column_sum += l[i]
sum+=column_sum
sum=sum/len(matrix[0])
for l in matrix: #Each readcount value is divided by the total read count
for i in range(len(l)):
div = float(l[i]/trc)
l[i]=div
for l in matrix: #Each readcount value is then multiplied by the sum of columns
for i in range(len(l)):
l[i]= float(l[i] * sum)
#Log Transform
for l in matrix:
for i in range(len(l)):
l[i]=math.log(1+l[i],2)
# print(matrix)
# print("hi")
input_matrix = np.array(matrix)
# print(M)
#The actual data matrix is extracted from the phenodata which acts as the true data.
phenotype = []
phenodata = open("Phenotype.txt",'r')
lines= phenodata.readlines()
for l in lines:
phen = l.split()
phenotype.append(int(phen[0]))
# phenotype1 = phenotype[1:]
true_matrix= np.array(phenotype)
#Input Data is split into Train and Test set with test size to be 33%
X_train, X_test, y_train, y_test = train_test_split(np.transpose(input_matrix),true_matrix,test_size=0.33)
#Kmeans Clustering is performed
kmeans=KMeans(n_clusters=2, random_state= 0).fit(X_train)
kmean_prediction = kmeans.predict(X_test) #Test data is passed to check the results.
print(accuracy_score(y_test,kmean_prediction)*100) # Accuracy of the predicted output with true data is taken out.
X_train, X_test, y_train, y_test = train_test_split(np.transpose(input_matrix),true_matrix,test_size=0.33)
#MiniBatchKmeans clustering is performed
Minibatchkmeans = MiniBatchKMeans(n_clusters=2, random_state= 0).fit(X_train)
minibatchkmean_prediction = Minibatchkmeans.predict(X_test)
print(accuracy_score(y_test,minibatchkmean_prediction)*100)
#Principle Component Analysis is performed to reduce the input data to 2Dimensional data.
pca = PCA(n_components=2).fit_transform(np.transpose(input_matrix))
# pca_fit = pca.fit(np.transpose(input_matrix))
y_trans = np.transpose(true_matrix)
plt.scatter(pca[:, 0], pca[:, 1], y_trans.shape[0], c = y_trans) #Scatter is used to visualize the graph
plt.show() | [
"[email protected]"
]
| |
c73cbe77e12eb7d42f02c99be8e900a957f29c3a | d10ce6055ec577cea66447e436dfb8f34083c429 | /line_trace.py | cd9afa114c8fc99270d98475cf8bd330f7dcf687 | []
| no_license | dlwjdxo21/ev3_aircleaner | 2deaec6e47bd4073e352d35b032815c631949c52 | 11c0683a0fbc042bccbd18ad95bb32f0652d80b6 | refs/heads/master | 2020-09-02T19:57:53.048363 | 2019-11-02T07:15:52 | 2019-11-02T07:15:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | // line trace using ev3dev, python
| [
"[email protected]"
]
| |
dc68438bc369293cbf262f10722059dbc20ee2e8 | 57094f0d09fd3e74eeb511e94400c3ec97051ad3 | /Quax_dev_archive/quax_misc/angular_momentum/tensor_approach/contracted/contracted_overlap.py | 1d41836be55f7685a172233f95c91a9581ffd5c8 | []
| no_license | adabbott/Research_Notes | cccba246e81065dc4a663703fe225fc1ebbf806b | 644394edff99dc6542e8ae6bd0ce8bcf158cff69 | refs/heads/master | 2023-05-12T20:26:58.938617 | 2021-06-02T17:15:35 | 2021-06-02T17:15:35 | 119,863,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,501 | py | import jax
import jax.numpy as np
import numpy as onp
from jax.config import config; config.update("jax_enable_x64", True)
np.set_printoptions(precision=10)
onp.set_printoptions(precision=10)
def double_factorial(n):
'''The double factorial function for small Python integer `n`.'''
return np.prod(np.arange(n, 1, -2))
@jax.jit
def odd_double_factorial(x): # this ones jittable, roughly equal speed
n = (x + 1)/2
return 2**n * np.exp(jax.scipy.special.gammaln(n + 0.5)) / (np.pi**(0.5))
@jax.jit
def normalize(aa,ax,ay,az):
'''
Normalization constant for gaussian basis function.
aa : orbital exponent
ax : angular momentum component x
ay : angular momentum component y
az : angular momentum component z
'''
#f = np.sqrt(double_factorial(2*ax-1) * double_factorial(2*ay-1) * double_factorial(2*az-1))
f = np.sqrt(odd_double_factorial(2*ax-1) * odd_double_factorial(2*ay-1) * odd_double_factorial(2*az-1))
N = (2*aa/np.pi)**(3/4) * (4 * aa)**((ax+ay+az)/2) / f
return N
@jax.jit
def overlap_ss(A, B, alpha_bra, alpha_ket):
ss = ((np.pi / (alpha_bra + alpha_ket))**(3/2) * np.exp((-alpha_bra * alpha_ket * np.dot(A-B,A-B)) / (alpha_bra + alpha_ket)))
return ss
#@jax.jit
def contracted_normalize(exponents,coeff,ax,ay,az):
'''Normalization constant for a single contracted gaussian basis function'''
K = exponents.shape[0] # Degree of contraction K
L = ax + ay + az # Total angular momentum L
# all possible combinations of ci * cj
c_times_c = np.outer(coeff,coeff)
# all possible combinations of alphai + alphaj
a_plus_a = np.broadcast_to(exponents, (K,K)) + np.transpose(np.broadcast_to(exponents, (K,K)), (1,0))
prefactor = (np.pi**(1.5) * double_factorial(2*ax-1) * double_factorial(2*ay-1) * double_factorial(2*az-1)) / 2**L
#prefactor = (np.pi**(1.5) * odd_double_factorial(2*ax-1) * odd_double_factorial(2*ay-1) * odd_double_factorial(2*az-1)) / 2**L
sum_term = np.sum(c_times_c / (a_plus_a**(L + 1.5)))
return (prefactor * sum_term) ** -0.5
@jax.jit
def contracted_overlap_ss(A, B, alpha_bra, alpha_ket, c_bra, c_ket):
size = alpha_bra.shape[0]
AB = np.dot(A-B,A-B)
# rather than looping over all primitive combinations, vectorize by expanding data into arrays
# all possible combinations of c_bra * c_ket
c_times_c = np.outer(c_bra,c_ket)
# all possible combinations of alpha_bra * alpha_ket
a_times_a = np.outer(alpha_bra,alpha_ket)
# all possible combinations of alpha_bra + alpha_ket
a_plus_a = np.outer(alpha_bra, np.ones_like(alpha_ket)) + np.transpose(np.outer(alpha_ket, np.ones_like(alpha_bra)))
ss = np.sum((np.pi / a_plus_a)**(1.5) * np.exp(-a_times_a * AB / a_plus_a) * c_times_c)
return ss
geom = np.array([[0.0,0.0,-0.849220457955],
[0.0,0.0, 0.849220457955]])
charge = np.array([1.0,1.0])
A = np.array([0.0,0.0,-0.849220457955])
B = np.array([0.0,0.0, 0.849220457955])
# This is a basis function
exps = np.array([0.5,
0.5])
coeffs = np.array([1.00,
1.00])
# Bake the normalizations into the coefficients, like Psi4
primitive_norms = jax.vmap(normalize)(exps, np.array([0,0]), np.array([0,0]),np.array([0,0]))
ang_mom_x, ang_mom_y, ang_mom_z = np.zeros_like(exps), np.zeros_like(exps), np.zeros_like(exps)
# Use vmap to auto vectorize the primitve normalization function
primitive_norms = jax.vmap(normalize)(exps, ang_mom_x, ang_mom_y, ang_mom_z)
coeffs = coeffs * primitive_norms
contracted_norm = contracted_normalize(exps, coeffs, 0, 0, 0)
coeffs = coeffs * contracted_norm
@jax.jit
def overlap_ps_block(A, B, alpha_bra, alpha_ket):
oot_alpha_bra = 1 / (2 * alpha_bra)
return oot_alpha_bra * jax.jacrev(overlap_ss,0)(A,B,alpha_bra,alpha_ket)
@jax.jit
def overlap_pp_block(A, B, alpha_bra, alpha_ket):
# We are promoting the ket, so the factor is the ket exponent
oot_alpha_ket = 1 / (2 * alpha_ket)
# No second term, ai is 0 since we are promoting the ket and theres no AM in the ket.
return oot_alpha_ket * (jax.jacfwd(overlap_ps_block, 1)(A,B,alpha_bra,alpha_ket))
# Try (p|p) This is a basis function
exp1 = np.array([0.5,
0.4])
exp2 = np.array([0.5,
0.4])
#coeffs1 = np.array([1.00,
# 1.00])
#coeffs2 = np.array([1.00,
# 1.00])
#
#N1 = contracted_normalize(exp1, coeffs1, 1, 0, 0)
#N2 = contracted_normalize(exp2, coeffs2, 0, 0, 0)
full_c = 0.5993114751532237 * 0.4534350390443813
primitive_1 = overlap_pp_block(A,B, 0.5, 0.4)
primitive_2 = overlap_pp_block(A,B, 0.5, 0.4)
print(full_c * primitive_1)
print(full_c * primitive_2)
print(primitive_1 + primitive_2)
full_c = 0.30081673988809127 * 0.2275959260774826
#print(
print(full_c * (primitive_1 + primitive_2))
#c = 0.15905414575341015
#c = 0.22671751952219066
#print(c * primitive_1 + c * primitive_2)
#print("EHHHH")
# Compute all unnormalized primitives
test_func = jax.vmap(overlap_pp_block, in_axes=(None,None,0,0))
test = test_func(A, B, exp1, exp2)
#print('og')
#print(test)
#print('sum')
#print(test[0] + test[1])
#print('sum, double, * c**2')
#c = .22671751952219066
#print( 2 * (test[0] + test[1]) * c**2)
#c1 = 0.30081673988809127
#c2 = 0.2275959260774826
#print((c1 * c1 * test[0] + c2 * c2 * test[1]))
#print((c1 * test[0] + c2 * test[1]))
#print(( test[0] + test[1]) * c1 * c2)
#erd_c1 = 0.21103860739153443
#erd_c2 = 0.15967039369300745
#print((erd_c1 * test[0] + erd_c2 * test[1])) #* c1 * c2)
#print('sum, 1/2alpha, * c**2')
#print( 1/(2*exp1[0]) * (test[0] + test[1]) * c**2)
#print(test * 0.29965573757661185)
#print(test * 0.29965573757661185 * 0.5)
#jprint(test * 0.29965573757661185**2 )
#primitive_norm = normalize(0.5, 1,0,0)
#print(primitive_norm)
#print(contracted_normalize(exp1,primitive_norm, 1,0,0))
coeffs1 = np.array([0.30081673988809127,
0.2275959260774826])
coeffs2 = np.array([0.21238156276178832,
0.17965292907913089])
# Now contract
#print( test[0] * coeffs1[0] + test[1] * coeffs1[1])
#print( test[1] * coeffs1[0] + test[0] * coeffs1[1])
#print(test * coeffs1[0] + test * coeffs2[1])
#
#print(0.29965573757661185 * (test[0] + test[1]))
#print(0.29965573757661185**2 * (test[0] + test[1]))
#print(coeffs1[0] * coeffs[1] test[0] + test[1]))
#TODO temp
#coeffs1 = np.array([0.30081673988809127,
# 0.2275959260774826])
#coeffs2 = np.array([0.21238156276178832,
# 0.17965292907913089])
#
## This is the correct (px|s)
#N1 = contracted_normalize(exp1, coeffs1, 1, 0, 0)
#N2 = contracted_normalize(exp2, coeffs2, 0, 0, 0)
#test = contracted_overlap_ps_block(A,B,exp1,exp2,coeffs1,coeffs2)
#print(test)
#print(test * N1 * N2)
#vectorized_overlap_ps_block = jax.vmap(overlap_ps_block, in_axes=(None,None, 0, 0))
#c_o = vectorized_overlap_ps_block(A, B, exps, exps)
#print(c_o)
#print(c_o * 0.5993114751532237 * 0.4237772081237576) # Coef's from Psi4
#print(overlap_pp_block(A,B,0.5,0.5))
#vectorized_overlap_pp_block = jax.vmap(overlap_pp_block, in_axes=(None,None, 0, 0))
#c_o = vectorized_overlap_pp_block(A, B, exps, exps)
#print(c_o)
#print(coeffs)
#coeffs = np.tile(coeffs,3).reshape(2,3)
#print(c_o * coeffs)
#print("Raw normalization constant")
#print(tmp_N)
#print("normalization constant times coefficients")
#print(tmp_N * coeffs)
#print("Raw overlap")
#print(c_o)
#print("normalized overlap")
#print(tmp_N * tmp_N * c_o)
#print(tmp_N * c_o)
s_N = 0.4237772081237576
p_N = 0.5993114751532237
d_N = 0.489335770373359
## (s|s)
#print(s_N * s_N * overlap_ss(A,B,alpha_bra,alpha_ket)) # YUP
## (p|s)
#print(p_N * s_N * overlap_ps_block(A,B,alpha_bra,alpha_ket)) # YUP
## (p|p)
#print(p_N * p_N * overlap_pp_block(A,B,alpha_bra,alpha_ket)) # YUP
## (d|s)
#print(d_N * s_N * overlap_ds_block(A,B,alpha_bra,alpha_ket)) # YUP
## (d|p)
#print(d_N * p_N * overlap_dp_block(A,B,alpha_bra,alpha_ket).reshape(6,3)) # YUP
## (d|d)
#print(d_N * d_N * overlap_dd_block(A,B,alpha_bra,alpha_ket))
#print('hard coded')
#print(overlap_ps_block(A,B,alpha_bra,alpha_ket))
#print('hard coded')
#print(overlap_pp_block(A,B,alpha_bra,alpha_ket))
#print('hard coded')
#print(overlap_ds_block(A,B,alpha_bra,alpha_ket))
#overlap_dp_block(A,B,alpha_bra,alpha_ket)
#dd_block = overlap_dd_block(A,B,alpha_bra,alpha_ket)
#print(dd_block * 0.489335770373359)
#for i in range(1000):
# overlap_pp_block(A,B,alpha_bra,alpha_ket)
@jax.jit
def overlap_ps_block(A, B, alpha_bra, alpha_ket):
oot_alpha_bra = 1 / (2 * alpha_bra)
return oot_alpha_bra * jax.jacrev(overlap_ss,0)(A,B,alpha_bra,alpha_ket)
@jax.jit
def overlap_sp_block(A, B, alpha_bra, alpha_ket): # not really needed is it?
oot_alpha_bra = 1 / (2 * alpha_bra)
return oot_alpha_bra * jax.jacrev(overlap_ss,1)(A,B,alpha_bra,alpha_ket)
@jax.jit
def overlap_pp_block(A, B, alpha_bra, alpha_ket):
# We are promoting the ket, so the factor is the ket exponent
oot_alpha_ket = 1 / (2 * alpha_ket)
# No second term, ai is 0 since we are promoting the ket and theres no AM in the ket.
return oot_alpha_ket * (jax.jacfwd(overlap_ps_block, 1)(A,B,alpha_bra,alpha_ket))
#@jax.jit
#def overlap_ds_block(A,B,alpha_bra,alpha_ket):
# # We are promoting the bra a second time, factor is bra exponent
# oot_alpha_bra = 1 / (2 * alpha_bra)
# # # This is of shape (3,3) all dij combos symmetric matrix # Thus a_i factor has to be 3x3 identity, so that only
# return oot_alpha_bra * (jax.jacfwd(overlap_ps_block, 0)(A,B,alpha_bra,alpha_ket) + np.eye(3) * overlap_ss(A,B,alpha_bra,alpha_ket))
@jax.jit
def overlap_ds_block(A,B,alpha_bra,alpha_ket):
'''
Returns a 1x6 array:
(dxx,s) (dxy,s) (dxz,s) (dyy,s) (dyz,s) (dzz,s)
'''
# We are promoting the bra a second time, factor is bra exponent
oot_alpha_bra = 1 / (2 * alpha_bra)
# # This is of shape (3,3) all dij combos symmetric matrix # Thus a_i factor has to be 3x3 identity, so that only
result = oot_alpha_bra * (jax.jacfwd(overlap_ps_block, 0)(A,B,alpha_bra,alpha_ket) + np.eye(3) * overlap_ss(A,B,alpha_bra,alpha_ket))
# This result is a 3x3 array containing all (dxx,s) (dxy,s) (dyx,s), only need upper or lower triangle
# Return upper triangle ((dxx, dxy, dxz, dyy, dyz, dzz) | s) as a vector
iu = np.triu_indices(3)
return result[iu]
@jax.jit
def overlap_dp_block(A,B,alpha_bra,alpha_ket):
'''
Returns a 1x18 array:
(dxx,px) (dxx,py) (dxx,pz) (dxy,px) (dxy,py) (dxy,pz) (dxz,px) (dxz,py) (dxz,pz) (dyy,px) (dyy,py) (dyy,pz) (dyz,px) (dyz,py) (dyz,pz) (dzz,px) (dzz,py) (dzz,pz)
If called directly, should reshape into a 6x3 block!
(dxx,px) (dxx,py) (dxx,pz)
(dxy,px) (dxy,py) (dxy,pz)
(dxz,px) (dxz,py) (dxz,pz)
(dyy,px) (dyy,py) (dyy,pz)
(dyz,px) (dyz,py) (dyz,pz)
(dzz,px) (dzz,py) (dzz,pz)
'''
oot_alpha_ket = 1 / (2 * alpha_ket) # use ket, since we are promoting ket from s-->p
# This is a 18x1 array of d by p functions. Could also use overlap_pp_block instead, i think?
return np.ravel(oot_alpha_ket * jax.jacfwd(overlap_ds_block, 1)(A,B,alpha_bra,alpha_ket))
@jax.jit
def overlap_dd_block(A,B,alpha_bra,alpha_ket):
'''
Returns a 6x6 array:
(dxx,dxx) (dxx,dxy) (dxx,dxz) (dxx,dyy) (dxx,dyz) (dxx,dzz)
(dxy,dxx) (dxy,dxy) (dxy,dxz) (dxy,dyy) (dxy,dyz) (dxy,dzz)
(dxz,dxx) (dxz,dxy) (dxz,dxz) (dxz,dyy) (dxz,dyz) (dxz,dzz)
(dyy,dxx) (dyy,dxy) (dyy,dxz) (dyy,dyy) (dyy,dyz) (dyy,dzz)
(dyz,dxx) (dyz,dxy) (dyz,dxz) (dyz,dyy) (dyz,dyz) (dyz,dzz)
(dzz,dxx) (dzz,dxy) (dzz,dxz) (dzz,dyy) (dzz,dyz) (dzz,dzz)
'''
oot_alpha_ket = 1 / (2 * alpha_ket) # use ket, since we are promoting ket from p-->d
# The jacfwd (first) term is an 18x3 array # ai coeffs are # the second term is
# (dxx,px) --> (dxx,dxx) (dxx, dxy), (dxx, dxz) 1, 0, 0 (dxx|s) (dxx|s) (dxx|s)
# (dxx,py) --> (dxx,dyx) (dxx, dyy), (dxx, dyz) 0, 1, 0 (dxx|s) (dxx|s) (dxx|s)
# (dxx,pz) --> (dxx,dzx) (dxx, dzy), (dxx, dzz) 0, 0, 1 (dxx|s) (dxx|s) (dxx|s)
# (dxy,px) --> (dxy,dxx) (dxy, dxy), (dxy, dxz) 1, 0, 0 (dxy|s) (dxy|s) (dxy|s)
# (dxy,py) --> (dxy,dyx) (dxy, dyy), (dxy, dyz) 0, 1, 0 (dxy|s) (dxy|s) (dxy|s)
# (dxy,pz) --> (dxy,dzx) (dxy, dzy), (dxy, dzz) 0, 0, 1 (dxy|s) (dxy|s) (dxy|s)
# .... ...
# (dzz,px) --> (dzz,dxx) (dzz, dxy), (dzz, dxz) 1, 0, 0 (dzz|s) (dzz|s) (dzz|s)
# (dzz,py) --> (dzz,dyx) (dzz, dyy), (dzz, dyz) 0, 1, 0 (dzz|s) (dzz|s) (dzz|s)
# (dzz,pz) --> (dzz,dzx) (dzz, dzy), (dzz, dzz) 0, 0, 1 (dzz|s) (dzz|s) (dzz|s)
first_term = jax.jacfwd(overlap_dp_block, 1)(A,B,alpha_bra,alpha_ket)
factor = np.tile(np.eye(3),(6,1))
tmp_second_term = overlap_ds_block(A,B,alpha_bra,alpha_ket)
second_term = factor * np.repeat(tmp_second_term, 9).reshape(18,3)
result = oot_alpha_ket * (first_term + second_term)
# result is of same signature as jacfwd (first) term above
# It contains duplicates in each 3x3 sub-array (upper and lower triangle are equal)
# reshape and grab out just upper triangle as a vector, reshape into matrix
iu1,iu2 = np.triu_indices(3)
result = result.reshape(6,3,3)[:,iu1,iu2].reshape(6,6)
return result
| [
"[email protected]"
]
| |
fe5cd719aeb4318becc6e6168757a6ea2f21449c | 44d1b566b7790e260d1e64246a31fd1919faed79 | /Vuelos.py | 611c4f07cceeed6ef57294e23ed5a3ed829ea77b | []
| no_license | wildaz/VuelaFacil | 841729851b3f120b96e752f730c67402d488f26a | bb90da54393a9ac658c3fcde71a5f4b6a463cc8c | refs/heads/master | 2023-08-05T12:24:05.173659 | 2021-09-09T01:33:51 | 2021-09-09T01:33:51 | 400,677,749 | 0 | 3 | null | 2021-09-09T01:33:52 | 2021-08-28T01:55:26 | null | UTF-8 | Python | false | false | 217 | py | class Reg_Vuelos:
Aviones=0
sillas=0
trayectos=""
def _init_(self,Aviones,sillas,trayectos):
self.Aviones=aviones
self.sillas=sillas
self.trayectos=trayectos
| [
"[email protected]"
]
| |
b3953b62fa3db6523934640dd6efa14908a3bbea | c5744c2fda48ae6a79c155c641fe98021a0cb7f3 | /PP4E/System/setenv.py | a03d7c04233f64c0efbc11ad24b5bc1eaace0f37 | []
| no_license | skinkie/Scripts | e0fd3d3f767612ade111f28bc7af3e1b25fc2947 | 80a1ba71ddf9a0c5ff33866832cb5c42aca0c0b1 | refs/heads/master | 2021-05-31T16:57:21.100919 | 2016-05-23T09:58:59 | 2016-05-23T09:58:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/env python
# eg. 3-3
import os
print('setenv...', end=' ')
print(os.environ['USER'])
os.environ['USER'] = 'Brian'
os.system('python echoenv.py')
os.environ['USER'] = 'Arthur'
os.system('python echoenv.py')
os.environ['USER'] = input('?')
print(os.popen('python3.5 echoenv.py').read()) | [
"[email protected]"
]
| |
794b0f3fabf741ae7f69f0b347bca4f1e8e0e5ca | df7d65014edd57612adbb12a2c37b074ca0479e0 | /poo/instancias.py | 331002d9490d2713f562b069a79a7e2fcaa1a9da | []
| no_license | palomaYPR/OOP-and-Algorithms-with-Python | 94380e18f71edcaaa8813eb914098263f609eb4e | 90f83aa9f2262cce9971b1df092ff3eba4fc8ec3 | refs/heads/main | 2023-02-07T14:17:49.716211 | 2020-12-26T23:19:37 | 2020-12-26T23:19:37 | 323,775,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | class Coordenadas:
def __init__(self, x, y):
self.x = x
self.y = y
def distancia(self, otra_coordenada):
x_diff = (self.x - otra_coordenada.x)**2
y_diff = (self.y - otra_coordenada.y)**2
return (x_diff + y_diff)**0.5
if __name__ == '__main__':
coord_1 = Coordenadas(3, 30)
coord_2 = Coordenadas(4, 8)
print(coord_1.distancia(coord_2))
# Nos permite sdeterminar sí, alguna de las coo es instancia de Coordenadas
print(isinstance(coord_2, Coordenadas)) | [
"[email protected]"
]
| |
205b0401f53dfb46f0c7e497fb932518c4ceafd5 | dc3113b6247324e664860928763c2eaedec6ec7b | /djitter/djitter/settings.py | 7f022325ebb6a0ccc9e86520806e14a8c94a3252 | []
| no_license | adwanAK/django-djitter | 49da757e556a34661219ad7d6c23a07629da1db0 | 23f85c5cd0f176229b6b2af933d5c46605f37bda | refs/heads/master | 2020-04-01T14:59:37.403860 | 2018-10-16T16:15:12 | 2018-10-16T16:15:12 | 153,316,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | """
Django settings for djitter project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm70&w8cwaz^k1-4u@fotd9oq(gs3lf_fu$v5$f_e=qum=yy8#p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djeeterprofile.apps.DjeeterprofileConfig',
'djeet.apps.DjeetConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djitter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,''), os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djitter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['NAME'],
'USER': os.environ['USER'],
'PASSWORD': os.environ['PASSWORD'],
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') #from djitter tutorial
| [
"[email protected]"
]
| |
fb602658c47b01a30ff2c7ae2b51eec8b1d10712 | faf2b052e7f8cd79467ad34fbc173f3bf4b1a21d | /test/test_ws_equipment_list_result.py | 7230f7e81388c99a49ad58906a36598acb75af45 | []
| no_license | atbe/MSU-Scholar-Api-Client-Python | 5d39577ce07ab285f0df9ee58a1ed7ff8ab08d2a | 31b263e5ad848fc6593c4662fbf2828ab9e2594c | refs/heads/master | 2021-05-04T00:51:46.720474 | 2018-03-12T23:52:17 | 2018-03-12T23:52:17 | 120,354,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | # coding: utf-8
"""
PURE API 510
This is the Pure Web Service. Listed below are all available endpoints, along with a short description.<br/>In order to use the Pure Web Service, you must enter an API key. These are generated in the Administrator tab of Pure, and issues with a given set of available endpoints.<br/>To enter your API key and begin your use, press the Authorize button to at the top of the page. You are then presented with two options for entering the API key: the first option is to use the API key in query format, and the second option is to use the API key in a header.<br/> For further documentation, see <a href=\"documentation/Default.htm\">API Documentation</a>.<br/>A new version of the API is released with each major version of Pure, and remains available for one year. This version is no longer available in Pure 5.14<br/>The old web service is deprecated, but still available <a href=\"../../../doc/\">here</a>, and it will no longer be available in Pure 5.13
OpenAPI spec version: 510
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import msu_scholars_api
from msu_scholars_api.rest import ApiException
from msu_scholars_api.models.ws_equipment_list_result import WSEquipmentListResult
class TestWSEquipmentListResult(unittest.TestCase):
""" WSEquipmentListResult unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testWSEquipmentListResult(self):
"""
Test WSEquipmentListResult
"""
# FIXME: construct object with mandatory attributes with example values
#model = msu_scholars_api.models.ws_equipment_list_result.WSEquipmentListResult()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
53cfd776131f3422dda00ac67dccaa0ac17829ce | 4c8cd4fff6fb734b127b03275a13b45822cef1df | /src/top/views.py | c260314a79c830de595a3b70a8f4fc4a3dfcf134 | []
| no_license | hiro13255/onlineSlone | fdfcd6ffed88d5067eeed018fd3cbc4da619531c | 42dae344b8735a39c44b30c4b685c33333b45f68 | refs/heads/master | 2023-06-15T12:26:40.843206 | 2021-07-12T03:03:50 | 2021-07-12T03:03:50 | 363,617,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | import environ
import logging
from django.shortcuts import render
from django.http import HttpResponse
from django.core.mail import send_mail
logger = logging.getLogger(__name__)
env = environ.Env()
env.read_env('.env')
# Create your views here.
def index(request):
logger.info('Function call index')
return render(request, 'top/index.html')
def join(request):
logger.info('Function call join')
if request.POST["email"] == "":
logger.info('email empty!')
return render(request, 'top/fail.html')
else:
subject = "エンジニア集会場の参加リンクの送付"
message = "エンジニア集会場にご参加して頂きありがとうございます。以下のURLからご参加ください。\n" + env('SEND_MESSAGE_LINK') + "\n参加後は「自己紹介」チャンネルにて簡単に自己紹介をお願いします! \\n\n今後とも宜しくお願いいたします。"
from_email = env('EMAIL_HOST_USER') # 送信者
recipient_list = [request.POST["email"]] # 宛先リスト
send_mail(subject, message, from_email, recipient_list)
logger.info('Send Mail OK!')
return render(request, 'top/success.html')
| [
"[email protected]"
]
| |
5f465d48674d60080950304420497ee045db7783 | 0aa972be2cf4a3c76d9e72d9f90eed5171de312c | /venv/bin/conch | b4375d25cdde38c95d4db6eff3acbe92a4c702c9 | [
"MIT"
]
| permissive | MATTTYMA/chemist_crawler | 335784b2815061b328b23f0c5c307baa6eddbd33 | 28fdf1e93c38cddc5f8b16e7098a60e2a51b1a7a | refs/heads/master | 2021-10-26T10:24:15.793086 | 2019-04-12T02:16:00 | 2019-04-12T02:16:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | #!/Users/mattewma/Developer/Python/chemist_crawler/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==18.9.0','console_scripts','conch'
__requires__ = 'Twisted==18.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==18.9.0', 'console_scripts', 'conch')()
)
| [
"[email protected]"
]
| ||
d8613587821bcdfec7eb5aa4ed92301b19e65ea2 | e9898a7428f318d6537affc515d86517c9e39800 | /shopping_paradise/users/urls.py | 455e9e38abbc800010351eb6cadab50eaf4316b8 | []
| no_license | mihaivalentistoica/shoping-paradise-sda | 2ba463f31857359fae8bba767b0c8a0917bc6bfd | 477aae6a78d7f8d1b5da697b08a1f77ca007934d | refs/heads/master | 2022-12-24T03:52:06.492464 | 2020-10-04T08:52:09 | 2020-10-04T08:52:09 | 298,762,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from django.urls import path
from . import views
urlpatterns = [
path('login/', views.UserLoginView.as_view(), name="user-login"),
path('logout/', views.UserLogoutViews.as_view(), name="user-logout"),
path('password_reset/', views.UserPasswordResetFormView.as_view(), name="user-password-reset"),
path('password_reset/done', views.UserPasswordResetDoneView.as_view(), name="user-password-reset-done"),
path('password_reset/complete', views.UserResetPasswordComplet.as_view(), name="user-password_reset_complete"),
path('reset/<uidb64>/<token>/', views.UserPasswordResetConfirmView.as_view(), name='user-password_reset_confirm'),
]
| [
"[email protected]"
]
| |
2ac0d5a5ddce621330c25fccc3359641c3461d91 | fa42ae42e12948b17989d93c3d3c11ea3b520170 | /users/views.py | 9cae081f8f998126092232ccc251852aeb564209 | []
| no_license | jarookiee/Django_learning_log | b9f6f3e7da53a289f018c7af20c59c82bd4346da | a33d6fb1504263b74e4dc6b54d68a9df01869591 | refs/heads/master | 2023-08-10T16:12:34.061581 | 2021-03-14T15:17:02 | 2021-03-14T15:17:02 | 160,336,482 | 0 | 0 | null | 2021-09-22T19:47:53 | 2018-12-04T09:59:50 | Python | UTF-8 | Python | false | false | 1,120 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
def logout_view(request):
"""Log the user out."""
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
"""Register a new user."""
if request.method != 'POST':
# Display blank registration form.
form = UserCreationForm()
else:
# Process completed form.
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# Log the user in, and then redirect to home page.
authenticated_user = authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
| [
"[email protected]"
]
| |
2c01c22fde0bafa7b2cf2ea3083d8131e8b95ac9 | 42b94a81ab09a41dc0b4d4607405a8af1fbd61f2 | /send_keys_from_image.py | 729601c3dadd3f38644179b37378c4a4c8b178e9 | []
| no_license | solominh/Python-SendKeysFromImage | e88ca725c5402751850e745c8b4ce7b0fa80e035 | 40b2a8157d49a59a7a6e192e3ed8daf58d437650 | refs/heads/master | 2021-01-12T01:18:21.297572 | 2017-01-09T09:04:28 | 2017-01-09T09:04:28 | 78,368,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py |
import keyboard
import pyautogui
import cv2
from get_image_region import draw_region
from subprocess import call
def main():
# Take screenshot
screenshot_path = './screenshot.png'
pyautogui.screenshot(screenshot_path)
# Draw image region
image = cv2.imread(screenshot_path)
ref_points = draw_region(image)
print(ref_points)
# Sanity check
if not ref_points:
print('Region not selected')
return False
# Save cropped image
cropped_image = image[ref_points['topleft'][1]:ref_points['bottomright'][1],
ref_points['topleft'][0]:ref_points['bottomright'][0]]
cv2.imwrite(screenshot_path, cropped_image)
# Convert image to text
text_ouput_path = './text_from_image'
call(["tesseract", screenshot_path, text_ouput_path])
return True
def send_keys():
with open('./text_from_image.txt') as f:
first_time = True
for line in f:
cleaned_line = line.strip()
if first_time:
first_time = False
else:
cleaned_line = ' ' + cleaned_line
print(cleaned_line)
keyboard.write(cleaned_line)
| [
"[email protected]"
]
| |
425f7a07c2c56758032421f3933a362bb9efea4a | 25276b0b8cb9fbecd3074e2d0928c4d8a913c023 | /papermario/diff_settings.py | 8c451ad9c1e364f5738b026efd8cf282a12d7acc | []
| no_license | ethteck/PaperMarioDecompilation | 25447120625852e4e1da572f6ec80e7c8e8251f0 | d62e76cef49db24b0a211da6f09287ea2093eb7a | refs/heads/master | 2022-12-06T17:32:22.007613 | 2020-08-15T06:28:22 | 2020-08-15T06:28:22 | 258,404,731 | 11 | 8 | null | 2020-08-15T06:25:44 | 2020-04-24T04:24:08 | Assembly | UTF-8 | Python | false | false | 217 | py | #!/usr/bin/env python3
def apply(config, args):
config['baseimg'] = '../baserom.z64'
config['myimg'] = 'papermario.z64'
config['mapfile'] = 'build/papermario.map'
config['source_directories'] = ['.']
| [
"[email protected]"
]
| |
5edebc4c6027c3117dc8ccb537693d29b68c04b9 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/EFT/ggH/Full2018/nAODv7_Full2018v7/plot.py | 568328f17254972378fbcc30b53eb168bb744218 | []
| no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 16,841 | py | # plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots.
# If not defined, normal plots is used
#
groupPlot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'isSignal' : 0,
'color': 400, # kYellow
'samples' : ['top']
}
groupPlot['WW'] = {
'nameHR' : 'WW',
'isSignal' : 0,
'color': 851, # kAzure -9
'samples' : ['WW', 'ggWW', 'WWewk']
}
groupPlot['Fake'] = {
'nameHR' : 'Non-prompt',
'isSignal' : 0,
'color': 921, # kGray + 1
'samples' : ['Fake_em', 'Fake_me']
}
groupPlot['DY'] = {
'nameHR' : "DY",
'isSignal' : 0,
'color': 418, # kGreen+2
'samples' : ['DY','Dyemb']
}
groupPlot['VVV'] = {
'nameHR' : 'VVV',
'isSignal' : 0,
'color': 857, # kAzure -3
'samples' : ['VVV']
}
groupPlot['VZ'] = {
'nameHR' : "VZ",
'isSignal' : 0,
'color' : 617, # kViolet + 1
'samples' : ['VZ', 'WZ', 'ZZ']
}
groupPlot['Vg'] = {
'nameHR' : "V#gamma",
'isSignal' : 0,
'color' : 810, # kOrange + 10
'samples' : ['Vg', 'Wg']
}
groupPlot['VgS'] = {
'nameHR' : "V#gamma*",
'isSignal' : 0,
'color' : 409, # kGreen - 9
'samples' : ['VgS_L','VgS_H']
}
'''
groupPlot['Higgs'] = {
'nameHR' : 'Higgs SM',
'isSignal' : 0,
'color': 632, # kRed
#'samples' : ['H_htt', 'H_hww', 'ZH_hww', 'ggZH_hww', 'WH_hww', 'qqH_hww', 'ggH_hww','bbH_hww','ttH_hww','ZH_htt', 'ggZH_htt', 'WH_htt', 'qqH_htt', 'ggH_htt','bbH_htt','ttH_htt' ]
'samples' : ['H_htt', 'ZH_hww', 'ggZH_hww', 'WH_hww', 'qqH_hww', 'ggH_hww','ttH_hww']
}
'''
groupPlot['H0PM'] = {
'nameHR' : 'SM H',
'color' : 851,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['H0PM','VBF_H0PM','WH_H0PM','ZH_H0PM']
}
'''
groupPlot['H0M'] = {
'nameHR' : 'H 0^{-}',
'color' : 617,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['H0M']
}
groupPlot['H0PH'] = {
'nameHR' : 'H 0+',
'color' : 632,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['H0PH']
}
groupPlot['H0L1'] = {
'nameHR' : 'H #Lambda1',
'color' : 409,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['H0L1']
}
'''
'''
groupPlot['VBF_H0PM'] = {
'nameHR' : 'VBF h',
'color' : 851,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['VBF_H0PM']
}
groupPlot['VBF_H0M'] = {
'nameHR' : 'VBF H0^{-}',
'color' : 617,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['VBF_H0M']
}
groupPlot['VBF_H0PH'] = {
'nameHR' : 'VBF H0+',
'color' : 632,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['VBF_H0PH']
}
groupPlot['VBF_H0L1'] = {
'nameHR' : 'VBF H#Lambda1',
'color' : 409,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
'samples' : ['VBF_H0L1']
}
'''
#ggF
plot['H0PM'] = {
'nameHR' : 'ggH h ',
'color': 3,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['H0M'] = {
'nameHR' : 'ggH 0^{-}',
'color' : 4,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['H0PH'] = {
'nameHR' : 'ggH h+ ',
'color': 5,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['H0L1'] = {
'nameHR' : 'ggH #Lambda1',
'color' : 6,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
# VBF
plot['VBF_H0PM'] = {
'nameHR' : 'VBF h ',
'color': 3,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['VBF_H0M'] = {
'nameHR' : 'VBF 0^{-}',
'color' : 4,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['VBF_H0PH'] = {
'nameHR' : 'VBF h+ ',
'color': 5,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['VBF_H0L1'] = {
'nameHR' : 'VBF #Lambda1',
'color' : 6,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
# VH
plot['WH_H0PM'] = {
'nameHR' : 'WH h ',
'color': 3,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['WH_H0M'] = {
'nameHR' : 'WH 0^{-}',
'color' : 4,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['WH_H0PH'] = {
'nameHR' : 'WH h+ ',
'color': 5,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['WH_H0L1'] = {
'nameHR' : 'WH #Lambda1',
'color' : 6,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['ZH_H0PM'] = {
'nameHR' : 'ZH h ',
'color': 5,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['ZH_H0M'] = {
'nameHR' : 'ZH 0^{-}',
'color' : 6,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['ZH_H0PH'] = {
'nameHR' : 'ZH h+ ',
'color': 5,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
plot['ZH_H0L1'] = {
'nameHR' : 'ZH #Lambda1',
'color' : 6,
'isSignal' : 1,
'isData' : 0,
'scale' : 1,
}
#plot = {}
# keys here must match keys in samples.py
#
'''
plot['ggH_hww_0PH'] = {
'nameHR' : 'ggH0PH',
'color' : 999,
'isSignal' : 1,
'isData' : 0,
'scale' : 1
}
plot['qqH_hww_0PH'] = {
'nameHR' : 'qqH0PH',
'color' : 617,
'isSignal' : 1,
'isData' : 0,
'scale' : 1
}
plot['ggH_hww_ALT'] = {
'nameHR' : 'ggH_ALT',
'color' : 851,
'isSignal' : 1,
'isData' : 0,
'scale' : 1
}
plot['qqH_hww_ALT'] = {
'nameHR' : 'qqH_ALT',
'color' : 617,
'isSignal' : 1,
'isData' : 0,
'scale' : 1
}
'''
plot['DY'] = {
'color': 418, # kGreen+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['Dyemb'] = {
'color': 418, # kGreen+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['Fake_em'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Fake_me'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'color': 400, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
#'cuts' : {
#'hww2l2v_13TeV_of0j' : 0.94 ,
#'hww2l2v_13TeV_top_of0j' : 0.94 ,
#'hww2l2v_13TeV_dytt_of0j' : 0.94 ,
#'hww2l2v_13TeV_em_0j' : 0.94 ,
#'hww2l2v_13TeV_me_0j' : 0.94 ,
##
#'hww2l2v_13TeV_of1j' : 0.86 ,
#'hww2l2v_13TeV_top_of1j' : 0.86 ,
#'hww2l2v_13TeV_dytt_of1j' : 0.86 ,
#'hww2l2v_13TeV_em_1j' : 0.86 ,
#'hww2l2v_13TeV_me_1j' : 0.86 ,
#},
}
plot['WW'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['ggWW'] = {
'color': 850, # kAzure -10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WWewk'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['Vg'] = {
'color': 859, # kAzure -1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VgS_L'] = {
'color' : 617, # kViolet + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VgS_H'] = {
'color' : 617, # kViolet + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VZ'] = {
'color': 858, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VVV'] = {
'color': 857, # kAzure -3
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
'''
# Htautau
plot['H_htt'] = {
'nameHR' : 'Htt',
'color': 632+4, # kRed+4
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
#plot['ZH_htt'] = {
# 'nameHR' : 'ZHtt',
# 'color': 632+3, # kRed+3
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
#
#plot['bbH_htt'] = {
# 'nameHR' : 'bbHtt',
# 'color': 632-1, # kRed-1
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
#
#plot['ttH_htt'] = {
# 'nameHR' : 'bbHtt',
# 'color': 632-2, # kRed-1
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
#
#
#plot['ggZH_htt'] = {
# 'nameHR' : 'ggZHtt',
# 'color': 632+4, # kRed+4
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
#
#plot['WH_htt'] = {
# 'nameHR' : 'WHtt',
# 'color': 632+2, # kRed+2
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
#
#
#plot['qqH_htt'] = {
# 'nameHR' : 'qqHtt',
# 'color': 632+1, # kRed+1
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
#
#
#plot['ggH_htt'] = {
# 'nameHR' : 'ggHtt',
# 'color': 632, # kRed
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
# HWW
#plot['H_hww'] = {
# 'nameHR' : 'Hww',
# 'color': 632, # kRed
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
plot['ZH_hww'] = {
'nameHR' : 'ZH',
'color': 632+3, # kRed+3
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
plot['ggZH_hww'] = {
'nameHR' : 'ggZH',
'color': 632+4, # kRed+4
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
plot['WH_hww'] = {
'nameHR' : 'WH',
'color': 632+2, # kRed+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
plot['qqH_hww'] = {
'nameHR' : 'qqH',
'color': 632+1, # kRed+1
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
plot['ggH_hww'] = {
'nameHR' : 'ggH',
'color': 632, # kRed
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
#plot['bbH_hww'] = {
# 'nameHR' : 'bbH',
# 'color': 632+5, # kRed+5
# 'isSignal' : 1,
# 'isData' : 0,
# 'scale' : 1 #
# }
plot['ttH_hww'] = {
'nameHR' : 'ttH',
'color': 632+6, # kRed+6
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
'''
# data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 0
}
# additional options
legend['lumi'] = 'L = 59.74/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
#comboPlot['SMvsALT'] = {
# 'groups' : ['H0PM','H0PH','H0M','H0L1']
#}
| [
"[email protected]"
]
| |
705ff0311f1e83b1691af0fa9a0639bced211d37 | 6e365df9ff647336bb5a8b72aa4b4bc8748a74b3 | /week8/day5/CS231n-Python3-master/assignment1/cs231n/classifiers/k_nearest_neighbor.py | 4c83d90fc2c9448a8abba71788ea63f8b52649b2 | []
| no_license | DongIk-Jang/programmers_dev_course | 4cf03b926e556018956fb77e553ec9fb26b0f30d | 07a002c23853bac457ebb25ded908d17420fff6f | refs/heads/master | 2023-06-09T07:00:05.748284 | 2021-07-05T09:16:09 | 2021-07-05T09:16:09 | 359,708,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,422 | py | import numpy as np
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension. #
#####################################################################
# dists[i,j] = np.sqrt(np.sum(np.square(self.X_train[j, :] - X[i, :])))
dists[i,j] = np.linalg.norm(self.X_train[j,:]-X[i,:])
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
# dists[i] = np.sqrt(np.sum(np.square(X[i] - self.X_train), axis=1))
dists[i, :] = np.linalg.norm(self.X_train - X[i,:], axis = 1)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy. #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
te = np.square(X).sum(axis=1)
tr = np.square(self.X_train).sum(axis=1)
te = te.T.reshape(te.shape[0], 1)
tr = tr.reshape(1, tr.shape[0])
M = -2 * np.dot(X, self.X_train.T)
# print('te shape:', te.shape)
# print('tr shape:', tr.shape)
# print('M shape:', M.shape)
dists = np.sqrt(te + tr + M)
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# index = np.argpartition(dists[i, :], -(self.X_train.shape[0] - k))[:k]
# index = np.argpartition(dists[i, :], k)[:k]
index = np.argsort(dists[i, :])[:k]
# print('index:', index)
label = list(self.y_train[index])
# print('label:', label)
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
from collections import Counter
y_pred[i] = int(Counter(label).most_common()[0][0])
# print('pred:', y_pred[i])
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
| [
"[email protected]"
]
| |
a5eec233d2b08c4926a8a84437e4236890b3e12a | 2e8272bad04f2fb1d5c7dbafbcceca0afa02d53a | /parser.py | cc6652816c0742a5d3b9611d780271b2425ddff4 | []
| no_license | lowweb/ostiePY | e138642efe8c7c5544f9588feaeee8481f701c99 | fe7c72c820e293c7fa0966804af86c11be22979f | refs/heads/master | 2021-02-06T14:08:41.997316 | 2020-02-29T06:52:17 | 2020-02-29T06:52:17 | 243,919,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | import csv
from insert_music import insert_music
array_music = []
unic_nconst = set()
with open('data/name.basics.tsv', newline='') as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
countrow = 0
for row in reader:
prof = row['primaryProfession'].strip(" ").split(",")
if 'composer' in prof or 'soundtrack' in prof:
if row['nconst'] not in unic_nconst:
insert_row = [int(row['nconst'][2:]), row['nconst'], row['primaryName'], row['primaryProfession'],row['knownForTitles'] ]
array_music.append(insert_row)
unic_nconst.add(row['nconst'])
countrow += 1
print(countrow)
insert_music(array_music)
| [
"[email protected]"
]
| |
607f0c745c7df74bf1cbfc3ebac73ac5b92debb3 | 8d03310627f1f625eddda8f4a3e680892872e0ec | /batemaneq/__init__.py | 09ee7bcfa329b2d98875fd9beb5ea50bbdbf1f40 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
]
| permissive | Rolleroo/batemaneq | 4da15e4bff32484d27ea9dc2b3338edc4956b0df | bd8c24d1f77ccb166b3210d81d9468f7789813ad | refs/heads/master | 2021-02-05T12:43:40.639427 | 2020-02-23T20:47:48 | 2020-02-23T20:47:48 | 243,781,711 | 1 | 0 | BSD-2-Clause | 2020-02-28T14:31:36 | 2020-02-28T14:31:36 | null | UTF-8 | Python | false | false | 356 | py | # -*- coding: utf-8 -*-
"""
batemaneq provides a Python package for evaluating the Bateman equation
"""
from __future__ import absolute_import
from ._release import __version__
from .bateman import bateman_parent, bateman_full
from ._bateman_double import bateman_parent as bateman_parent_arr
from ._bateman_double import bateman_full as bateman_full_arr
| [
"[email protected]"
]
| |
31f063a4f2c10039d18c2b50fa5313f550fd0ea6 | 11964177956aa6d5d26f8838af794b33b7887bf3 | /Secret_Santa/wsgi.py | 1f6fd0559de7eb7453e1a41bf756c5cf0d9585a1 | []
| no_license | Tyler-Irving/Secret-Santa | fa134a27ca6803aa20591a778f2f00c7f480c778 | 49f026cb005fdf8d6b42b32501c4892f34fa0fe0 | refs/heads/master | 2020-09-14T18:02:09.871886 | 2019-11-21T15:51:35 | 2019-11-21T15:51:35 | 223,208,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for Secret_Santa project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Secret_Santa.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.