blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e07fa43a83bd2adf094cd9ebac21dd89d40d6130 | bebaaeb46b31d6781f9d3ecad54a55cbbc7213fb | /cw03/cw6.py | 977a1e77748b6b043928a73c03427b59b8c7a63e | [] | no_license | D3XXXt3R/PGiM | ce9681441d833f45cefee94565c1dd5fef1e9650 | 8eeecc3543be27788b760a6717a729b46883d055 | refs/heads/master | 2021-09-03T05:21:09.403913 | 2018-01-05T23:05:06 | 2018-01-05T23:05:06 | 106,942,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | import os
import cv2
img1 = cv2.imread(os.path.dirname(os.path.abspath(".")) + "\images" + "\eagle.jpg")
img2 = cv2.imread(os.path.dirname(os.path.abspath(".")) + "\images" + "\image.jpg")
result = cv2.divide(img1, img2)
cv2.imshow("img1", img1)
cv2.imshow("img2", img2)
cv2.imshow("cv2.add img1 and img2", result)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
7c2844d70a96688b97f5507202d86e90e17c7f8b | d3aab787246667a80373843ed165387277d590d4 | /sdk/python/tests/integration/feature_repos/integration_test_repo_config.py | e4ff667764809f0a6222d8b25b4153235cf9980f | [
"Apache-2.0"
] | permissive | woop/feast-test | 0302d30f8a6c0fb9b0e7e6d7d11c071a353a5ac2 | 0938060be08eff0eb94400384329a07fd17bd543 | refs/heads/master | 2022-03-09T20:41:50.933324 | 2022-03-05T06:03:38 | 2022-03-05T06:03:38 | 248,950,704 | 0 | 1 | Apache-2.0 | 2020-03-21T11:07:54 | 2020-03-21T10:12:35 | Java | UTF-8 | Python | false | false | 1,089 | py | from dataclasses import dataclass
from typing import Dict, Type, Union
from tests.integration.feature_repos.universal.data_source_creator import (
DataSourceCreator,
)
from tests.integration.feature_repos.universal.data_sources.file import (
FileDataSourceCreator,
)
@dataclass(frozen=True)
class IntegrationTestRepoConfig:
"""
This class should hold all possible parameters that may need to be varied by individual tests.
"""
provider: str = "local"
online_store: Union[str, Dict] = "sqlite"
offline_store_creator: Type[DataSourceCreator] = FileDataSourceCreator
full_feature_names: bool = True
infer_features: bool = False
python_feature_server: bool = False
def __repr__(self) -> str:
return "-".join(
[
f"Provider: {self.provider}",
f"{self.offline_store_creator.__name__.split('.')[-1].rstrip('DataSourceCreator')}",
self.online_store
if isinstance(self.online_store, str)
else self.online_store["type"],
]
)
| [
"[email protected]"
] | |
94096acaeabb4aaab31c981ee3428e6996c89c0c | 8c7935dff5256f607823a25372960ca126a02661 | /FPL_wildcard_team_selector/__init__.py | 2e8523740904b59e9ef3f29c76e389fc3c1f45e9 | [
"MIT"
] | permissive | abdul-gendy/FPL_wildcard_team_selector | 13fd4375e3d88b9c9831ae731e6f630ad7bb7cee | e8dce9b655c44c665bb681d772ef0c215816ccd8 | refs/heads/main | 2023-02-09T19:44:14.968850 | 2021-01-09T15:50:19 | 2021-01-09T15:50:19 | 316,372,344 | 0 | 0 | MIT | 2021-01-09T15:46:20 | 2020-11-27T01:31:55 | Python | UTF-8 | Python | false | false | 82 | py | from .initiate_team_selection_pipeline import play_wildcard, generate_player_stats | [
"[email protected]"
] | |
0ace437a85db6e96650951783193984ca95813b5 | c90081303077c4b61eea8fd3e358b3e4fdc9fe3e | /python/setup.py | d46141714a638ff7c5165fbbacb30938c7deec32 | [] | no_license | deepankarm/jinad-clients | 215bb8b678c9c0535f12043432d0a9caf9ddb90b | dfdf80613b37cbaaae47083e7f97527d57cf7642 | refs/heads/main | 2023-03-03T19:55:19.855216 | 2021-02-08T19:43:57 | 2021-02-08T19:43:57 | 337,189,609 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """
JinaD (Daemon)
REST interface for managing distributed Jina # noqa: E501
The version of the OpenAPI document: 0.9.32
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "openapi-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
]
setup(
name=NAME,
version=VERSION,
description="JinaD (Daemon)",
author="OpenAPI Generator community",
author_email="[email protected]",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "JinaD (Daemon)"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
long_description="""\
REST interface for managing distributed Jina # noqa: E501
"""
)
| [
"[email protected]"
] | |
169d7cc14dc8f7dc5cef3961d36301b8c238a443 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-1150.py | f0c360d921b9d495733e208728892b4243b4c859 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,992 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif $Exp:
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"[email protected]"
] | |
ffc1f5e25662f03a42ca4d8b62463a5ac471ce17 | 613ef791c47bad3bccc6c840147548ad0ae48351 | /18term1/COMP9021/Assignment_1/readtest.py | e6d7b3d954a5022983aa541120b2028c1f93e53c | [] | no_license | andygaojw/COMP-COURSE | d02b3beef045f7a18cfc0636c54045f00f4091c3 | f923f7aedaaea24dc894396533f79dfd298d13a3 | refs/heads/master | 2022-12-10T11:12:55.651087 | 2020-09-12T17:48:43 | 2020-09-12T17:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | import sys
filename = input("")
array = []
try:
with open(filename) as file:
line = file.readline()
while line:
line = line.strip('\n')
line = list(line)
while ' ' in line:
line.remove(' ')
array.append(line)
line = file.readline()
except FileNotFoundError:
print("Can't find file!")
sys.exit()
for i in range(len(array)):
for j in range(len(array[i])):
array[i][j] = int(array[i][j])
print(array)
| [
"[email protected]"
] | |
1d588f39142ab87ca6ee36bc0076d85cc7529753 | f89d54420d4b0d86753afcf7d8db248cabaa4868 | /venv/bin/django-admin.py | 769730120a72404d2d010fd90c9f9b0fd5b4f303 | [] | no_license | k121995/Eric | a58a247399588d3956cfeac25654f1c4f5f471d2 | 20f27a5f2ecd0249e2e13e245c5ce0a6ce168e03 | refs/heads/master | 2021-04-07T10:55:32.264499 | 2020-03-20T04:41:07 | 2020-03-20T04:41:07 | 248,669,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #!/home/user/Desktop/new/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
0977cd18f95a2dfb2553b1988fb052ae585579fd | c23d14c870ba0e3ecd739447018d890b80523fbb | /pyforchange/egg/resources/extensions.py | b8488b314b8cf4dff3a4fd01b830b422eb59166d | [
"MIT",
"CC-BY-4.0"
] | permissive | PythonForChange/pyforchange | 9e1cfb286bca2720c9cf4f2ff345aa70d09ce87e | 2cc5afef227ac68147e291e447c57924586a0b12 | refs/heads/master | 2023-06-22T10:21:42.507358 | 2021-07-22T20:59:10 | 2021-07-22T20:59:10 | 374,409,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | import os
from pyforchange.egg.resources.modules import install
# BEWARE: There is an import in line ~51
class Lang:
def __init__(self,name: str):
self.name=name
self.extension="."+self.name
self.root="/usr/bin/python "+name+self.extension
def write(self,T: str,name: str):
f=open(name+self.extension,"w")
f.write(T)
f.close()
def append(self,T: str,name: str):
f=open(name+self.extension,"a")
f.write(T)
f.close()
def read(self, name: str):
f=open(name+self.extension,"r")
text=f.read()
f.close()
return text
def execute(self,name: str):
try:
os.system(self.root)
except:
print("Execute error in: "+self.root)
def delete(self,name: str):
os.remove(name+self.extension)
def getLines(self,name: str):
h=open(name+self.extension,"r")
lines=h.readlines()
h.close()
return lines
def writeLines(self,lines,name: str):
self.write("",name)
for i in lines:
self.append(i,name)
# Extensions
py=Lang("py")
txt=Lang("txt")
nqa=Lang("nqa")
pfcf=Lang("pfcf")
html=Lang("html")
class Document():
def __init__(self, name: str, output: str ="index"):
install("bs4")
# BEWARE: There is an import here
from bs4 import BeautifulSoup
self.name=name
doc=open(self.name+".html").read()
self.soup=BeautifulSoup(doc,features="html.parser")
self.output=output
lines=html.getLines(self.name)
html.writeLines(lines, self.output)
def write(self, text: str, id: str):
lines=html.getLines(self.output)
for i in range(0,len(lines)):
l=lines[i]
words=l.split()
if words[0]=="$egg" and id==words[1]:
lines[i]=text+"\n"
html.writeLines(lines,self.output)
return lines
def addTag(self,tag: str, content: str, id: str):
return self.write(makeTag(self,tag, content),id)
def makeTag(document,tag: str, content: str):
new=document.soup.new_tag(tag)
new.string=content
return str(new)
| [
"[email protected]"
] | |
704f69454fb77d18fbc3f25613f593f8968ae830 | 4c5b7f7f56105616344a3d5b59c60fe4f2756b01 | /user/views.py | d6724afdda6732bfd5fd189f15aa7bb41fd531db | [
"MIT"
] | permissive | KrishnaVeer7712/India-National-Safety-Management-Web | 2b79e1c19808864001c9b9cc7af01a6567219a7b | bfc60664b243b71c66265aa89fc4da8886b3d511 | refs/heads/master | 2023-01-09T03:31:27.556100 | 2020-11-12T06:29:23 | 2020-11-12T06:29:23 | 311,727,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | from django.shortcuts import render,redirect
from django.contrib import messages
from django.contrib.auth import authenticate,login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from .forms import UserRegisterForm
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
##################################################################
####################index#######################################
def index(request):
return render(request, 'user/index.html',{'title':'index'})
def read_file(request):
f = open('D:/Sem Projects/Sem 7 projects/Capstone/India National Safety Management\India National Safety Management/text.txt', 'r')
file_content = f.read()
f.close()
return HttpResponse(file_content, content_type="text/plain")
########################################################################
########### register here #####################################
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST) or None
if form.is_valid():
username = request.POST.get('username')
#########################mail####################################
htmly = get_template('user/Email.html')
d = { 'username': username }
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
html_content = htmly.render(d)
msg = EmailMultiAlternatives(subject, html_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except:
print("error in sending mail")
##################################################################
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'user/register.html', {'form': form,'title':'reqister here'})
###################################################################################
################login forms###################################################
def Login(request):
if request.method == 'POST':
#AuthenticationForm_can_also_be_used__
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
form = login(request,user)
messages.success(request, f' wecome {username} !!')
return redirect('index')
else:
messages.info(request, f'account done not exit plz sign in')
form = AuthenticationForm()
return render(request, 'user/login.html', {'form':form,'title':'log in'})
| [
"[email protected]"
] | |
84006f624864e501e1d1e5bfd61e533585689919 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/thirtyPercent/rank_1s4y_D.py | 65f83a88199c4d9bcd5ca63bf3413fa8d2c6358d | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '1s4y.csv'
identifier = 'D'
coefFrac = 0.3
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/thirtyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/thirtyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
] | |
5151493f43a0b90174efde9a14fa9b6bcf09ea17 | dd42972062dee3d4f59bf185173efa90d3f89e36 | /tests/data_asset/test_expectation_decorators.py | 427216e0c854e19f8ffd1a63ac8f2ffb461749b4 | [
"Apache-2.0"
] | permissive | SwishAnalytics/great_expectations | 7d63692826e904e05fd7a8b6c38284c8e1d451b4 | 31f3dc1d858f6c0235e7bd27f2af6c9bdf7fa775 | refs/heads/develop | 2022-04-19T12:31:15.304005 | 2020-04-18T00:16:21 | 2020-04-18T00:16:21 | 250,349,030 | 0 | 0 | Apache-2.0 | 2020-03-26T19:28:56 | 2020-03-26T19:10:48 | null | UTF-8 | Python | false | false | 18,695 | py | from __future__ import division
import pytest
from great_expectations.core import ExpectationKwargs, ExpectationConfiguration, ExpectationValidationResult
from great_expectations.data_asset import DataAsset
from great_expectations.dataset import PandasDataset, MetaPandasDataset
class ExpectationOnlyDataAsset(DataAsset):
@DataAsset.expectation([])
def no_op_expectation(self, result_format=None, include_config=True, catch_exceptions=None, meta=None):
return {"success": True}
@DataAsset.expectation(['value'])
def no_op_value_expectation(self, value=None,
result_format=None, include_config=True, catch_exceptions=None, meta=None):
return {"success": True}
@DataAsset.expectation([])
def exception_expectation(self,
result_format=None, include_config=True, catch_exceptions=None, meta=None):
raise ValueError("Gotcha!")
def test_expectation_decorator_build_config():
eds = ExpectationOnlyDataAsset()
eds.no_op_expectation()
eds.no_op_value_expectation('a')
config = eds.get_expectation_suite()
assert ExpectationConfiguration(
expectation_type='no_op_expectation',
kwargs={}
) == config.expectations[0]
assert ExpectationConfiguration(
expectation_type='no_op_value_expectation',
kwargs=ExpectationKwargs({
'value': 'a'
})
) == config.expectations[1]
def test_expectation_decorator_include_config():
eds = ExpectationOnlyDataAsset()
out = eds.no_op_value_expectation('a', include_config=True)
assert ExpectationConfiguration(
expectation_type='no_op_value_expectation',
kwargs={
'value': 'a',
'result_format': 'BASIC'
}
) == out.expectation_config
def test_expectation_decorator_meta():
metadata = {'meta_key': 'meta_value'}
eds = ExpectationOnlyDataAsset()
out = eds.no_op_value_expectation('a', meta=metadata)
config = eds.get_expectation_suite()
assert ExpectationValidationResult(
success=True,
meta=metadata,
expectation_config=config.expectations[0]
) == out
assert ExpectationConfiguration(
expectation_type='no_op_value_expectation',
kwargs={'value': 'a'},
meta=metadata
) == config.expectations[0]
def test_expectation_decorator_catch_exceptions():
eds = ExpectationOnlyDataAsset()
# Confirm that we would raise an error without catching exceptions
with pytest.raises(ValueError):
eds.exception_expectation(catch_exceptions=False)
# Catch exceptions and validate results
out = eds.exception_expectation(catch_exceptions=True)
assert out.exception_info['raised_exception'] is True
# Check only the first and last line of the traceback, since formatting can be platform dependent.
assert 'Traceback (most recent call last):' == out.exception_info['exception_traceback'].split('\n')[0]
assert 'ValueError: Gotcha!' == out.exception_info['exception_traceback'].split('\n')[-2]
def test_pandas_column_map_decorator_partial_exception_counts():
df = PandasDataset({'a': [0, 1, 2, 3, 4]})
out = df.expect_column_values_to_be_between('a', 3, 4,
result_format={'result_format': 'COMPLETE', 'partial_unexpected_count': 1})
assert 1 == len(out.result['partial_unexpected_counts'])
assert 3 == len(out.result['unexpected_list'])
def test_column_map_expectation_decorator():
# Create a new CustomPandasDataset to
# (1) demonstrate that custom subclassing works, and
# (2) Test expectation business logic without dependencies on any other functions.
class CustomPandasDataset(PandasDataset):
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_odd(self, column):
return column.map(lambda x: x % 2)
@MetaPandasDataset.column_map_expectation
def expectation_that_crashes_on_sixes(self, column):
return column.map(lambda x: (x-6)/0 != "duck")
df = CustomPandasDataset({
'all_odd': [1, 3, 5, 5, 5, 7, 9, 9, 9, 11],
'mostly_odd': [1, 3, 5, 7, 9, 2, 4, 1, 3, 5],
'all_even': [2, 4, 4, 6, 6, 6, 8, 8, 8, 8],
'odd_missing': [1, 3, 5, None, None, None, None, 1, 3, None],
'mixed_missing': [1, 3, 5, None, None, 2, 4, 1, 3, None],
'all_missing': [None, None, None, None, None, None, None, None, None, None]
})
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
assert df.expect_column_values_to_be_odd("all_odd") == ExpectationValidationResult(
result={'element_count': 10,
'missing_count': 0,
'missing_percent': 0.0,
'partial_unexpected_counts': [],
'partial_unexpected_index_list': [],
'partial_unexpected_list': [],
'unexpected_count': 0,
'unexpected_index_list': [],
'unexpected_list': [],
'unexpected_percent': 0.0,
'unexpected_percent_nonmissing': 0.0},
success=True
)
assert df.expect_column_values_to_be_odd("all_missing") == ExpectationValidationResult(
result={'element_count': 10,
'missing_count': 10,
'missing_percent': 100.0,
'partial_unexpected_counts': [],
'partial_unexpected_index_list': [],
'partial_unexpected_list': [],
'unexpected_count': 0,
'unexpected_index_list': [],
'unexpected_list': [],
'unexpected_percent': 0.0,
'unexpected_percent_nonmissing': None},
success=True
)
assert df.expect_column_values_to_be_odd("odd_missing") == ExpectationValidationResult(
result={'element_count': 10,
'missing_count': 5,
'missing_percent': 50.0,
'partial_unexpected_counts': [],
'partial_unexpected_index_list': [],
'partial_unexpected_list': [],
'unexpected_count': 0,
'unexpected_index_list': [],
'unexpected_list': [],
'unexpected_percent': 0.0,
'unexpected_percent_nonmissing': 0.0},
success=True
)
assert df.expect_column_values_to_be_odd("mixed_missing") == ExpectationValidationResult(
result={'element_count': 10,
'missing_count': 3,
'missing_percent': 30.0,
'partial_unexpected_counts': [{'value': 2., 'count': 1}, {'value': 4., 'count': 1}],
'partial_unexpected_index_list': [5, 6],
'partial_unexpected_list': [2., 4.],
'unexpected_count': 2,
'unexpected_index_list': [5, 6],
'unexpected_list': [2, 4],
'unexpected_percent': 20.0,
'unexpected_percent_nonmissing': (2/7 * 100)},
success=False
)
assert df.expect_column_values_to_be_odd("mostly_odd") == ExpectationValidationResult(
result={'element_count': 10,
'missing_count': 0,
'missing_percent': 0,
'partial_unexpected_counts': [{'value': 2., 'count': 1}, {'value': 4., 'count': 1}],
'partial_unexpected_index_list': [5, 6],
'partial_unexpected_list': [2., 4.],
'unexpected_count': 2,
'unexpected_index_list': [5, 6],
'unexpected_list': [2, 4],
'unexpected_percent': 20.0,
'unexpected_percent_nonmissing': 20.0},
success=False
)
assert df.expect_column_values_to_be_odd("mostly_odd", mostly=.6) == ExpectationValidationResult(
result={'element_count': 10,
'missing_count': 0,
'missing_percent': 0,
'partial_unexpected_counts': [{'value': 2., 'count': 1}, {'value': 4., 'count': 1}],
'partial_unexpected_index_list': [5, 6],
'partial_unexpected_list': [2., 4.],
'unexpected_count': 2,
'unexpected_index_list': [5, 6],
'unexpected_list': [2, 4],
'unexpected_percent': 20.0,
'unexpected_percent_nonmissing': 20.0},
success=True
)
assert df.expect_column_values_to_be_odd("mostly_odd", result_format="BOOLEAN_ONLY") == \
ExpectationValidationResult(success=False)
df.default_expectation_args["result_format"] = "BOOLEAN_ONLY"
assert df.expect_column_values_to_be_odd("mostly_odd") == ExpectationValidationResult(success=False)
df.default_expectation_args["result_format"] = "BASIC"
assert df.expect_column_values_to_be_odd("mostly_odd", include_config=True) == ExpectationValidationResult(
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_odd",
kwargs={
"column": "mostly_odd",
"result_format": "BASIC"
}
),
result={
'element_count': 10,
'missing_count': 0,
'missing_percent': 0,
'partial_unexpected_list': [2, 4],
'unexpected_count': 2,
'unexpected_percent': 20.0,
'unexpected_percent_nonmissing': 20.0},
success=False
)
def test_column_aggregate_expectation_decorator():
# Create a new CustomPandasDataset to
# (1) demonstrate that custom subclassing works, and
# (2) Test expectation business logic without dependencies on any other functions.
class CustomPandasDataset(PandasDataset):
@PandasDataset.column_aggregate_expectation
def expect_column_median_to_be_odd(self, column):
median = self.get_column_median(column)
return {"success": median % 2, "result": {"observed_value": median}}
df = CustomPandasDataset({
'all_odd': [1, 3, 5, 7, 9],
'all_even': [2, 4, 6, 8, 10],
'odd_missing': [1, 3, 5, None, None],
'mixed_missing': [1, 2, None, None, 6],
'mixed_missing_2': [1, 3, None, None, 6],
'all_missing': [None, None, None, None, None, ],
})
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
assert df.expect_column_median_to_be_odd("all_odd") == ExpectationValidationResult(
result={'observed_value': 5, 'element_count': 5, 'missing_count': 0, 'missing_percent': 0},
success=True
)
assert df.expect_column_median_to_be_odd("all_even") == ExpectationValidationResult(
result={'observed_value': 6, 'element_count': 5, 'missing_count': 0, 'missing_percent': 0},
success=False
)
assert df.expect_column_median_to_be_odd(
"all_even", result_format="SUMMARY") == ExpectationValidationResult(
result={'observed_value': 6, 'element_count': 5, 'missing_count': 0, 'missing_percent': 0},
success=False
)
assert df.expect_column_median_to_be_odd(
"all_even", result_format="BOOLEAN_ONLY") == ExpectationValidationResult(success=False)
df.default_expectation_args["result_format"] = "BOOLEAN_ONLY"
assert df.expect_column_median_to_be_odd("all_even") == ExpectationValidationResult(success=False)
assert df.expect_column_median_to_be_odd("all_even", result_format="BASIC") == ExpectationValidationResult(
result={'observed_value': 6, 'element_count': 5, 'missing_count': 0, 'missing_percent': 0},
success=False
)
def test_column_pair_map_expectation_decorator():
# Create a new CustomPandasDataset to
# (1) Demonstrate that custom subclassing works, and
# (2) Test expectation business logic without dependencies on any other functions.
class CustomPandasDataset(PandasDataset):
@PandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_different(self,
column_A,
column_B,
keep_missing="either",
output_format=None, include_config=True, catch_exceptions=None
):
return column_A != column_B
df = CustomPandasDataset({
'all_odd': [1, 3, 5, 7, 9],
'all_even': [2, 4, 6, 8, 10],
'odd_missing': [1, 3, 5, None, None],
'mixed_missing': [1, 2, None, None, 6],
'mixed_missing_2': [1, 3, None, None, 6],
'all_missing': [None, None, None, None, None, ],
})
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "all_even") == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"unexpected_list": [],
"unexpected_index_list": [],
"partial_unexpected_list": [],
"partial_unexpected_index_list": [],
"partial_unexpected_counts": [],
}
)
assert df.expect_column_pair_values_to_be_different(
"all_odd",
"all_even",
ignore_row_if="both_values_are_missing",
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"unexpected_list": [],
"unexpected_index_list": [],
"partial_unexpected_list": [],
"partial_unexpected_index_list": [],
"partial_unexpected_counts": [],
}
)
assert df.expect_column_pair_values_to_be_different("all_odd", "odd_missing") == ExpectationValidationResult(
success=False,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 3,
"missing_percent": 0.0,
"unexpected_percent": 60.0,
"unexpected_percent_nonmissing": 60.0,
"unexpected_list": [(1, 1.), (3, 3.), (5, 5.)],
"unexpected_index_list": [0, 1, 2],
"partial_unexpected_list": [(1, 1.), (3, 3.), (5, 5.)],
"partial_unexpected_index_list": [0, 1, 2],
"partial_unexpected_counts": [
{'count': 1, 'value': (1, 1.)},
{'count': 1, 'value': (3, 3.)},
{'count': 1, 'value': (5, 5.)}
]
}
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "odd_missing", ignore_row_if="both_values_are_missing") == ExpectationValidationResult(
success=False,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 3,
"missing_percent": 0.0,
"unexpected_percent": 60.0,
"unexpected_percent_nonmissing": 60.0,
"unexpected_list": [(1, 1.), (3, 3.), (5, 5.)],
"unexpected_index_list": [0, 1, 2],
"partial_unexpected_list": [(1, 1.), (3, 3.), (5, 5.)],
"partial_unexpected_index_list": [0, 1, 2],
"partial_unexpected_counts": [
{'count': 1, 'value': (1, 1.)},
{'count': 1, 'value': (3, 3.)},
{'count': 1, 'value': (5, 5.)}
]
}
)
assert df.expect_column_pair_values_to_be_different(
"all_odd",
"odd_missing",
ignore_row_if="either_value_is_missing"
) == ExpectationValidationResult(
success=False,
result={
"element_count": 5,
"missing_count": 2,
"unexpected_count": 3,
"missing_percent": 40.0,
"unexpected_percent": 60.0,
"unexpected_percent_nonmissing": 100.0,
"unexpected_list": [(1, 1.), (3, 3.), (5, 5.)],
"unexpected_index_list": [0, 1, 2],
"partial_unexpected_list": [(1, 1.), (3, 3.), (5, 5.)],
"partial_unexpected_index_list": [0, 1, 2],
"partial_unexpected_counts": [
{'count': 1, 'value': (1, 1.)},
{'count': 1, 'value': (3, 3.)},
{'count': 1, 'value': (5, 5.)}
]
}
)
with pytest.raises(ValueError):
df.expect_column_pair_values_to_be_different(
"all_odd",
"odd_missing",
ignore_row_if="blahblahblah"
)
# Test SUMMARY, BASIC, and BOOLEAN_ONLY output_formats
assert df.expect_column_pair_values_to_be_different(
"all_odd",
"all_even",
result_format="SUMMARY"
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"partial_unexpected_list": [],
"partial_unexpected_index_list": [],
"partial_unexpected_counts": [],
}
)
assert df.expect_column_pair_values_to_be_different(
"all_odd",
"all_even",
result_format="BASIC"
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"partial_unexpected_list": [],
}
)
assert df.expect_column_pair_values_to_be_different(
"all_odd",
"all_even",
result_format="BOOLEAN_ONLY"
) == ExpectationValidationResult(success=True)
| [
"[email protected]"
] | |
8b086ad20d35c4dda3e0b0be54da794780585885 | c437ee09fa856327b66c78c0419ebf1848ad58fb | /PageBotNano-010-Template/pagebotnano_010/toolbox/__init__.py | db64f7c78b5c8ee4f74eb2d58d61dce227badce1 | [
"MIT"
] | permissive | juandelperal/PageBotNano | 6fea81d027bfe55d1888f55c66b2bcf17bb47ff5 | 7f0d82755d6eb6962f206e5dd0d08c40c0947bde | refs/heads/master | 2022-11-12T05:14:53.019506 | 2020-07-01T11:17:13 | 2020-07-01T11:17:13 | 275,766,295 | 0 | 0 | NOASSERTION | 2020-06-29T07:50:27 | 2020-06-29T07:50:26 | null | UTF-8 | Python | false | false | 3,056 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T N A N O
#
# Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# This source makes it possible to import other sources
# from this diretory/folder
#
import sys
if __name__ == "__main__":
sys.path.insert(0, "../..") # So we can import pagebotnano without installing.
from pagebotnano_010.constants import PADDING
def extensionOf(path):
"""Answer the extension of path. Answer None of there is no extension.
>>> extensionOf('../../images/myImage.jpg')
'jpg'
>>> extensionOf('aFile.PDF') # Answer a lowercase
'pdf'
>>> extensionOf('aFile') is None # No extension
True
>>> extensionOf('../../aFile') is None # No extension on file name
True
"""
parts = path.split('/')[-1].split('.')
if len(parts) > 1:
return parts[-1].lower()
return None
def fileNameOf(path):
"""Answer the file name part of the path.
>>> fileNameOf('../../aFile.pdf')
'aFile.pdf'
>>> fileNameOf('../../') is None # No file name
True
"""
return path.split('/')[-1] or None
# M E A S U R E S
def mm(mm):
"""Convert from millimeter values to rounded points
>>> mm(210)
595
>>> mm(297)
842
"""
return int(round(mm * 72 * 0.039370)) # Approximated 1" = 25.400051mm
def cm(cm):
"""Convert from millimeter values to rounded points
>>> cm(21)
595
>>> cm(29.7)
842
"""
return int(round(cm * 72 * 0.039370 * 10)) # Approximated 1" = 25.400051mm
def makePadding(padding):
"""Check on the various ways that padding can be defined.
>>> makePadding((10, 20, 30, 40))
(10, 20, 30, 40)
>>> makePadding((10, 20))
(10, 20, 10, 20)
>>> makePadding(50)
(50, 50, 50, 50)
"""
if isinstance(padding, (list, tuple)):
if len(padding) == 2:
pt, pr = pb, pl = padding
elif len(padding) == 4:
pt, pr, pb, pl = padding
else: # In case None or illegal value, then just use defailt
raise ValueError('%s.padding: Not the right kind of padding "%s"' % (self.__class.__.__name__, padding))
elif padding is None or isinstance(padding, (int, float)):
pt = pr = pb = pl = padding or PADDING
else: # In case None or illegal value, then just use defailt
raise ValueError('%s.padding: Not the right kind of padding "%s"' % (self.__class.__.__name__, padding))
if pt is None:
pt = PADDING
if pr is None:
pr = PADDING
if pb is None:
pb = PADDING
if pl is None:
pl = PADDING
return pt, pr, pb, pl
if __name__ == "__main__":
# Running this document will execute all >>> comments as test of this source.
import doctest
doctest.testmod()[0] | [
"[email protected]"
] | |
4f834e8074c6a53607cf96517f86c1115d219d69 | c0f72c5ed3bbb9288f8796a612a675942a1f7c85 | /mqtt_processor/pub.py | cad383b3ddec288fd2b7e7f651dc605e2cde1a4b | [
"MIT"
] | permissive | yourequiremorecitygas/process-server | 542378a805158dd528b357dd7def90b6b3117515 | d111f20d8a6a67b7ea6ef675671295b736924cd5 | refs/heads/master | 2020-05-01T18:03:06.902916 | 2019-06-11T15:58:00 | 2019-06-11T15:58:00 | 177,615,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import paho.mqtt.client as mqtt
mqtt = mqtt.Client("python_pub") #Mqtt Client 오브젝트 생성
mqtt.connect("52.194.252.52", 1883) #MQTT 서버에 연결
mqtt.publish("topic", "AT") #토픽과 메세지 발행
mqtt.publish("topic", "BT")
mqtt.loop(2) | [
"[email protected]"
] | |
6834331ec3c28cd177a9637e46c13aca13bbefe1 | 23e737a4d6855b92681406b87967127faab61db1 | /Python Programs/Exp_17.py | 6918f026d5e81eb7074179df47cd7404a9228d77 | [] | no_license | KARTHIK-KG/COD-Lab | 2558c061fec192a5e18403f1c470abd08bec62dc | cefb603d6440ffbb380cfc454a652c28e86759cd | refs/heads/main | 2023-08-06T19:56:57.178697 | 2021-10-05T04:10:28 | 2021-10-05T04:10:28 | 389,884,272 | 11 | 3 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # Python program to find the gcd of two numbers
# Step 1:Get 2 integer inputs from the user
# Step 2:Next, use decision-making statements or use recursion to check if both the given numbers are divisible by any number (i) without leaving any remainder.
# Step 3:If true, then GCD = i
# Step 4:Print the GCD of the two numbers
# Step 5:End of the program
def hcf(a,b):
if(b==0):
return a
else:
return hcf(b,a%b)
a=60
b=48
print("The gcd of 60 and 48 is : ",end="")
print(hcf(60,48))
# Input :
# Enter two numbers
# 60
# 48
# Output :
# The gcd of 60 and 48 is : 12
| [
"[email protected]"
] | |
75415f8c142d19940438175484ca95e2698259e8 | ec7b3664e31c0bbd995d75b16854c3712a81ce0b | /HandTrackingModule.py | ee63840507ff8f710c7b8cf87509cde576f912aa | [] | no_license | nokckanh/ControlVolume | c7bd0de0a8cd1c7db767644417256bb6bf3474be | 32eb62b964a512f5b8a7dd58a19013ccbd355415 | refs/heads/master | 2023-05-09T23:56:02.952468 | 2021-06-03T09:23:51 | 2021-06-03T09:23:51 | 373,450,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | import cv2
import mediapipe as mp
import time
import math
class handDetector() :
def __init__(self, mode = False, maxHands = 2,detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12 ,16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,self.mpHands.HAND_CONNECTIONS)
return img
def findPositon(self, img , handNo = 0 , draw = True):
xlist = []
ylist = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHands = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHands.landmark):
h , w ,c = img.shape
cx , cy = int(lm.x * w ), int(lm.y * h)
xlist.append(cx)
ylist.append(cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img,(cx,cy),5, (255,0,255) , cv2.FILLED)
xmin , xmax = min(xlist) , max(xlist)
ymin , ymax = min(ylist) , max(ylist)
bbox = xmin,ymin,xmax,ymax
if draw:
cv2.rectangle(img, (bbox[0]-20, bbox[1]-20), (bbox[2]+20, bbox[3]+20), (0,255,0),2)
return self.lmList ,bbox
def fingersUp(self):
fingers = []
#Ngon cai
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(0)
else :
fingers.append(1)
# 4 fingers
for id in range(1, 5) :
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
def findDistance(self, p1,p2,img,draw=True):
x1, y1 = self.lmList[p1][1], self.lmList[p1][2]
x2, y2 = self.lmList[p2][1], self.lmList[p2][2]
cx , cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw :
cv2.circle(img,(x1,y1),15,(255,0,255), cv2.FILLED )
cv2.circle(img,(x2,y2),15,(255,0,255), cv2.FILLED )
cv2.line(img, (x1,y1), (x2,y2), (255,0,255), 3)
cv2.circle(img,(cx,cy),15,(255,0,255), cv2.FILLED )
length = math.hypot(x2 - x1 , y2-y1)
return length, img, [x1, y1, x2 ,y2, cx ,cy]
| [
"[email protected]"
] | |
c278d54c536e37d74902544d465054438ef73832 | 0571b10dfdd9d5727a858aa0b91f4e77a1bb486f | /test1.py | 48b42f5310e1ad8c71d452784f2c67cd5ada6c2f | [
"MIT"
] | permissive | dcampanini/xgboost_classification | dddf6a04a6e0635d8c454c44191cbfbde58ac576 | 45a7ab760c5d5be1b6bcf14d0a8a39673071da64 | refs/heads/master | 2023-08-29T19:48:56.430208 | 2021-06-27T19:08:03 | 2021-06-27T19:08:03 | 377,159,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,586 | py | #%%
import pandas as pd
import numpy as np
from numpy import loadtxt
import time
import pathlib
import pickle
from datetime import datetime
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
#%%
def extract_features(num_pnm_data,fecha_macs,pnm_data):
#%% generar arreglo para el set de entrenamiento
field_num=3+4*num_pnm_data # =date+mac+clase + num_metrica*num_pnm_data
set1= pd.DataFrame(0, index=range(len(fecha_macs)), columns=range(field_num))
# %% calcular metricas y armar dataset de entrenamiento
for i in range(len(fecha_macs)):
#i=3
# extraer para una mac y fecha especifica, todos los datos disponibles
# por cada hora, idealmente serian 24 filas
mac_data=pnm_data.loc[(pnm_data['MAC_ADDRESS']==fecha_macs.iloc[i][1])
& (pnm_data['DATE_FH']==fecha_macs.iloc[i][0])]
#% etiquetar
if mac_data['ESTADO'].iloc[0]=='CERRADO':
set1.iloc[i,-1]=1
else:
set1.iloc[i,-1]=0
# % calcular mean y var
mean=mac_data.iloc[:,3:16].mean()
var=mac_data.iloc[:,3:16].var()
# %% calcular weighted moving average (wma)
weights=np.flip(np.arange(1,len(mac_data)+1))
wma=mac_data.iloc[:,3:16].apply(lambda x: np.dot(x,weights)/sum(weights))
#%% guardar fecha y mac_address
set1.iloc[i,0:2]=mac_data.iloc[0,[0,2]]
#%% guardar mean, var, wma, mean-wma en set1
set1.iloc[i,2:2+len(mean)]=mean
set1.iloc[i,2+len(mean):2+2*len(mean)]=var
set1.iloc[i,2+2*len(mean):2+3*len(mean)]=wma
set1.iloc[i,2+3*len(mean):2+4*len(mean)]=mean-wma
# retornar arreglo con las features calculadas
return set1
#%% =============================================================================
start = time.time()
print('Cargando datos')
reclama1=pd.read_csv('train/mac_reclama_dia_abril.csv',low_memory=False)
reclama2=pd.read_csv('train/mac_reclama_dia_mayo.csv',low_memory=False)
reclama3=pd.read_csv('train/mac_reclama_dia_junio.csv',low_memory=False)
noreclama1=pd.read_csv('train/mac_no_reclama_label_abril.csv',low_memory=False)
noreclama2=pd.read_csv('train/mac_no_reclama_label_mayo.csv',low_memory=False)
noreclama3=pd.read_csv('train/mac_no_reclama_label_junio.csv',low_memory=False)
df_train=[reclama2,noreclama2]
df_test=[reclama1,reclama3,noreclama1,noreclama3]
train=pd.concat(df_train)
test=pd.concat(df_test)
#%%
#sub1=df2
train1=train.sample(1000)
test1=test.sample(1000)
#%% drop some columns
train1=train1.drop(['FECHA_AFECTACION_00'], inplace=False, axis=1)
test1=test1.drop(['FECHA_AFECTACION_00'], inplace=False, axis=1)
#pnm_data=train1
#%% extraer dataframe con mac_address unicas
#macs=sub1_ft.loc[:,['MAC_ADDRESS']].drop_duplicates().sort_values(by=['MAC_ADDRESS'])
#%% extraer dataframe con (fecha,mac_address)
fecha_macs_train=train1.loc[:,['DATE_FH',
'MAC_ADDRESS']].drop_duplicates().sort_values(by=['MAC_ADDRESS',
'DATE_FH'])
fecha_macs_test=test1.loc[:,['DATE_FH',
'MAC_ADDRESS']].drop_duplicates().sort_values(by=['MAC_ADDRESS',
'DATE_FH'])
# %%
print('Inicio ejecucion feature engineering')
num_pnm_data=13
train1=extract_features(num_pnm_data,fecha_macs_train,train1)
test1=extract_features(num_pnm_data,fecha_macs_test,test1)
print('Fin ejecucion feature engineering')
#%%
set2=train1.dropna()
set3=test1.dropna()
#%% =============================================================================
# Entrenar modelo
print('Inicio entrenamiento modelo')
x_train=set2.iloc[:,2:54]
y_train=set2.iloc[:,54]
x_test=set3.iloc[:,2:54]
y_test=set3.iloc[:,54]
#%% fit model on training data
model = XGBClassifier()
model.fit(x_train, y_train,verbose=True)
print('Fin entrenamiento modelo')
#%% make predictions for test data
y_pred = model.predict(x_test)
y_pred2=np.zeros(x_test.shape[0])
probas=model.predict_proba(x_test)
#%%
for i in range(len(probas)) :
if probas[:,1][i] >0.9:
y_pred2[i]=1
else:
y_pred2[i]=0
# confusion matrix
cm=confusion_matrix(y_test, y_pred2)
tn, fp, fn, tp=confusion_matrix(y_test, y_pred2).ravel()
print('tp=',tp,'fp=',fp,'tn=',tn,'fn=',fn)
#%%
predictions = [round(value) for value in y_pred]
#%% evaluate predictions
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
print('Fin ejecucion modelo')
# %%
end = time.time()
elapsed = (end - start)
print('Tiempo en segundos',elapsed,'Tiempo en minutos',elapsed/60)
#%% =============================================================================
#%% save results and model
#make a directory
today = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
dir=pathlib.Path.cwd().joinpath('results'+today)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
#%% save real labels and predicted
y_test_np=y_test.to_numpy()
output=pd.DataFrame({'real':y_test,'pred':y_pred})
output.to_csv(dir.joinpath('results.csv'))
# save accuracy
np.savetxt(dir.joinpath('accuracy.txt'),np.resize(np.array(accuracy),(1,1)))
#%% save model
model.save_model(dir.joinpath(today+'.model'))
pickle.dump(model, open(dir.joinpath(today+'.pickle.dat'), "wb"))
# %% confusion matrix
plot_confusion_matrix(model, x_test, y_test,
display_labels=['reclama', 'no reclama'],
cmap=plt.cm.Blues)
plt.savefig(dir.joinpath('confusion_matrix.jpg'),dpi=300)
#plt.show()
| [
"[email protected]"
] | |
d2fccd29e2c63b4b1fda9a9ccb9c8d23211b6530 | fbbb93e9c8b07ff509d44088bd1ba8c31ccb9745 | /app/main/views.py | 7731ed9f3d68c200530957c602449cf9afaa04d3 | [] | no_license | Muia23/pitchyy | ceda760988d0091b37e0b045b61e2c053b993695 | a54e8c6086b52d5b0f82dbb722b3412fa392fd64 | refs/heads/master | 2022-11-29T14:33:31.499248 | 2020-07-19T09:17:13 | 2020-07-19T09:17:13 | 281,632,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | from flask import render_template,request,redirect,url_for
from . import main
from flask_login import login_required
@main.route('/')
def index():
'''
View root page function returning index page
'''
title = 'Soi | Where you express yourself'
return render_template('index.html', title = title)
@main.route('/comment')
@login_required
def index():
'''
View root page function returning index page
'''
title = 'Soi | Where you express yourself'
return render_template('index.html', title = title)
@main.route('/posting')
@login_required
def index():
'''
View root page function returning index page
'''
title = 'Soi | Where you express yourself'
return render_template('index.html', title = title) | [
"[email protected]"
] | |
3e0ff04db8d41c0617cd8e5d9a4521b068448bce | c70748b3b90734e13de3125fe305510cd3345c1d | /44-isMatch.py | 211833a97dbb4267b62e48f692cd356ead42d862 | [] | no_license | sunshinewxz/leetcode | 9fca9a005644b06a5d8da230aeec4c216ac0e436 | d2e0fb4a55003d5c230fb8b2e13ac8b224b47a75 | refs/heads/master | 2021-07-19T08:15:22.159485 | 2019-01-22T14:04:25 | 2019-01-22T14:04:25 | 118,546,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
s_p = 0
p_p = 0
last_p = -1
last_s = -1
while(s_p < len(s)):
if p_p < len(p) and (s[s_p] == p[p_p] or p[p_p] == '?'):
s_p += 1
p_p += 1
elif p_p < len(p) and p[p_p] == '*':
last_s = s_p
last_p = p_p
p_p += 1
elif p_p > 0 and p[p_p-1] == '*':
s_p += 1
elif last_p > -1:
p_p = last_p
s_p = last_s + 1
else:
return False
while(s_p < len(s)):
return False
while(p_p < len(p)):
if p[p_p] == '*':
p_p += 1
else:
return False
return True
| [
"[email protected]"
] | |
d306754b0cdcc2e74315ec7c4b729095348e64ad | ffaeaf54e891c3dcca735347f27f1980f66b7a41 | /python/6.net/1.UDP/3.udp_thread.py | 61ab82af0f1f72b2aa7363beb316ed17c62e1c54 | [
"Apache-2.0"
] | permissive | dunitian/BaseCode | 9804e3d8ff1cb6d4d8cca96978b20d168072e8bf | 4855ef4c6dd7c95d7239d2048832d8acfe26e084 | refs/heads/master | 2020-04-13T09:51:02.465773 | 2018-12-24T13:26:32 | 2018-12-24T13:26:32 | 137,184,193 | 0 | 0 | Apache-2.0 | 2018-06-13T08:13:38 | 2018-06-13T08:13:38 | null | UTF-8 | Python | false | false | 970 | py | from socket import socket, AF_INET, SOCK_DGRAM
from multiprocessing.dummy import Pool as ThreadPool
def send_msg(udp_socket):
while True:
msg = input("输入需要发送的消息:\n")
udp_socket.sendto(msg.encode("utf-8"), ("192.168.36.235", 8080))
def recv_msg(udp_socket):
while True:
data, info = udp_socket.recvfrom(1024)
print(f"[来自{info[0]}:{info[1]}的消息]:\n{data.decode('utf-8')}")
def main():
# 创建一个Socket
with socket(AF_INET, SOCK_DGRAM) as udp_socket:
# 绑定端口
udp_socket.bind(('', 5400))
# 创建一个线程池
pool = ThreadPool()
# 接收消息
pool.apply_async(recv_msg, args=(udp_socket, ))
# 发送消息
pool.apply_async(send_msg, args=(udp_socket, ))
pool.close() # 不再添加任务
pool.join() # 等待线程池执行完毕
print("over")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
df692854b59b9541f96b9a2daaff92351946e434 | 1e00035f67485df7544d5b195bef4b765db3cdc2 | /lidc_nodule_setup.py | 9af14a10e04438a9bf19405c79a3f0b7ce36d7cb | [] | no_license | sinecode/ct-scan-ml | 314cad2f645145868452d302321c6398fb52b0c1 | c8e1d87aef7633565eb5968f092285224005cf98 | refs/heads/master | 2023-02-28T11:37:45.640393 | 2021-02-09T18:46:19 | 2021-02-09T18:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,902 | py | import argparse
from pathlib import Path
import xml.etree.ElementTree as ET
from statistics import median
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import tqdm
from utils import read_dcm, extract_patch, pad_to_shape, volume_to_example
from config import (
LIDC_SMALL_NEG_TFRECORD,
LIDC_SMALL_POS_TFRECORD,
LIDC_BIG_NEG_TFRECORD,
LIDC_BIG_POS_TFRECORD,
LIDC_SMALL_UNLABELED_TFRECORD,
LIDC_BIG_UNLABELED_TFRECORD,
SMALL_PATCH_SHAPE,
BIG_PATCH_SHAPE,
)
def read_lidc_size_report(csv_file):
"Read the CSV file obtained from http://www.via.cornell.edu/lidc/"
df = pd.read_csv(csv_file, dtype={"case": str, "scan": str})
df["noduleIDs"] = (
df[["nodIDs", "Unnamed: 10", "Unnamed: 11", "Unnamed: 12"]]
.fillna("")
.values.tolist()
)
df["noduleIDs"] = df["noduleIDs"].apply(lambda x: [e for e in x if e])
df = df.drop(
columns=[
"volume",
"eq. diam.",
"nodIDs",
"Unnamed: 8",
"Unnamed: 10",
"Unnamed: 11",
"Unnamed: 12",
"Unnamed: 13",
"Unnamed: 14",
"Unnamed: 15",
]
).rename(
columns={
"x loc.": "xloc",
"y loc.": "yloc",
"slice no.": "zloc",
"noduleIDs": "ids",
}
)
df = df[
df.ids.apply(len) >= 3
] # keep the scan with at least 3 evaluations
return df
def get_malignancies(xml_file, nodule_ids):
"Return a list of the assigned malignancies extracted from the XML"
tree = ET.parse(xml_file)
root = tree.getroot()
prefix = "{http://www.nih.gov}"
malignancies = []
for reading_session in root.findall(f"{prefix}readingSession"):
for nodule in reading_session.findall(f"{prefix}unblindedReadNodule"):
nodule_id = nodule.findall(f"{prefix}noduleID")[0].text
if nodule_id in nodule_ids:
malignancy = int(
nodule.findall(f"*/{prefix}malignancy")[0].text
)
if malignancy > 0:
malignancies.append(malignancy)
return malignancies
def main():
parser = argparse.ArgumentParser(
description="Extract the 3D patches containing the nodules and store them in TFRecord files.",
)
parser.add_argument(
"data_dir",
help="Directory containing all the DCM files downloaded from https://wiki.cancerimagingarchive.net/display/Public/LIDC-IDRI",
)
parser.add_argument(
"csv_file",
help="CSV file obtained from http://www.via.cornell.edu/lidc",
)
args = parser.parse_args()
data_dir = Path(args.data_dir)
nodules_df = read_lidc_size_report(args.csv_file)
assert (
len(nodules_df.index) == 1387
), f"The input CSV {args.csv_file} has not the expected size."
with tf.io.TFRecordWriter(
LIDC_SMALL_NEG_TFRECORD
) as small_neg_writer, tf.io.TFRecordWriter(
LIDC_SMALL_POS_TFRECORD
) as small_pos_writer, tf.io.TFRecordWriter(
LIDC_BIG_NEG_TFRECORD
) as big_neg_writer, tf.io.TFRecordWriter(
LIDC_BIG_POS_TFRECORD
) as big_pos_writer, tf.io.TFRecordWriter(
LIDC_SMALL_UNLABELED_TFRECORD
) as small_unlabeled_writer, tf.io.TFRecordWriter(
LIDC_BIG_UNLABELED_TFRECORD
) as big_unlabeled_writer:
for row in tqdm(nodules_df.itertuples(), total=len(nodules_df.index)):
case = row.case
scan_id = row.scan
dcm_dir_glob = list(
data_dir.glob(f"LIDC-IDRI-{case}/*/{scan_id}.*/")
)
if len(dcm_dir_glob) == 0:
print(
f"WARNING ({scan_id=} {case=}): "
"Scan not found. Skipping this scan ..."
)
continue
if len(dcm_dir_glob) > 1:
print(
f"WARNING ({scan_id=} {case=}): "
"Found multiple scans with same ids. Skipping this scan ..."
)
continue
dcm_dir = dcm_dir_glob[0]
scan = read_dcm(dcm_dir, reverse_z=True)
xml_files = list(dcm_dir.glob("*.xml"))
if len(xml_files) == 0:
print(
f"WARNING ({scan_id=} {case=}): "
"Can't find a XML file. Skipping this scan ..."
)
continue
elif len(xml_files) > 1:
print(
f"WARNING ({scan_id=} {case=}): "
"Found multiple XML files. Skipping this scan ..."
)
continue
xml_file = xml_files[0]
nodule_ids = row.ids
malignancies = get_malignancies(xml_file, nodule_ids)
median_malignancy = median(malignancies)
if median_malignancy < 3:
big_writer = big_neg_writer
small_writer = small_neg_writer
elif median_malignancy > 3:
big_writer = big_pos_writer
small_writer = small_pos_writer
else:
# if the malignancies median is 3 then write the patch
# as unlabeled
big_writer = big_unlabeled_writer
small_writer = small_unlabeled_writer
big_patch = extract_patch(
scan,
(row.zloc, row.yloc, row.xloc),
BIG_PATCH_SHAPE[:-1],
)
big_patch = pad_to_shape(big_patch, BIG_PATCH_SHAPE[:-1])
big_patch = np.expand_dims(big_patch, axis=-1)
if not big_patch.any():
print(
f"WARNING ({scan_id=} {case=}): "
"Patch contains only zeros. Skipping this patch ..."
)
continue
assert (
big_patch.shape == BIG_PATCH_SHAPE
), f"Wrong shape for scan {scan_id} in case {case}."
big_patch = big_patch.astype(np.float32)
big_example = volume_to_example(big_patch)
big_writer.write(big_example.SerializeToString())
small_patch = extract_patch(
scan,
(row.zloc, row.yloc, row.xloc),
SMALL_PATCH_SHAPE[:-1],
)
small_patch = pad_to_shape(small_patch, SMALL_PATCH_SHAPE[:-1])
small_patch = np.expand_dims(small_patch, axis=-1)
assert (
small_patch.shape == SMALL_PATCH_SHAPE
), f"Wrong shape for scan {scan_id} in case {case}."
small_patch = small_patch.astype(np.float32)
small_example = volume_to_example(small_patch)
small_writer.write(small_example.SerializeToString())
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
cfbce743baa0c1fe1079ee32d823653ed757fea6 | a6fe8aeaa30a22b65d98a2bb360b6d761a2e17fc | /venv/lib/python2.7/site-packages/kubernetes/client/models/v1alpha1_volume_attachment_source.py | c7f33ff1de036a2409698e7567f67c9fccabbe75 | [
"MIT"
] | permissive | 784134748/kubernetes-install | 54a2a8e83e2f47f2064270649725899282b7b244 | 5df59632c2619632e422948b667fb68eab9ff5be | refs/heads/master | 2022-12-15T13:52:43.486633 | 2019-03-27T13:01:06 | 2019-03-27T13:01:06 | 176,937,818 | 0 | 0 | MIT | 2022-05-25T01:56:18 | 2019-03-21T12:13:41 | Python | UTF-8 | Python | false | false | 3,526 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1VolumeAttachmentSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'persistent_volume_name': 'str'
}
attribute_map = {
'persistent_volume_name': 'persistentVolumeName'
}
def __init__(self, persistent_volume_name=None):
"""
V1alpha1VolumeAttachmentSource - a model defined in Swagger
"""
self._persistent_volume_name = None
self.discriminator = None
if persistent_volume_name is not None:
self.persistent_volume_name = persistent_volume_name
@property
def persistent_volume_name(self):
"""
Gets the persistent_volume_name of this V1alpha1VolumeAttachmentSource.
Name of the persistent volume to attach.
:return: The persistent_volume_name of this V1alpha1VolumeAttachmentSource.
:rtype: str
"""
return self._persistent_volume_name
@persistent_volume_name.setter
def persistent_volume_name(self, persistent_volume_name):
"""
Sets the persistent_volume_name of this V1alpha1VolumeAttachmentSource.
Name of the persistent volume to attach.
:param persistent_volume_name: The persistent_volume_name of this V1alpha1VolumeAttachmentSource.
:type: str
"""
self._persistent_volume_name = persistent_volume_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1VolumeAttachmentSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
e737df015e691b0a8ec14605360b790ddc906336 | 4cd9c222387b3057c2eaa621ab42c499b3c78790 | /Week2/guess_my_number.py | d0fe9ff944f528025112b9d085ab57c54de1a51a | [] | no_license | Alhern/MITx-6.00.1x | 269c7d23fa730be0b07d22403882b04fe593443e | 3f5ff85f6c09abd763a4d750ffa32358354e6b99 | refs/heads/master | 2020-12-02T09:10:17.096673 | 2017-07-09T20:39:00 | 2017-07-09T20:39:00 | 96,708,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | low = 0.0
high = 100
guess = (high + low) / 2.0
ans = ''
print('Please think of a number between 0 and 100!')
while ans != 'c':
ans = input('Is your secret number ' + str(guess) + '?' + '\nEnter "h" to indicate the guess is too high. Enter "l" to indicate the guess is too low. Enter "c" to indicate I guessed correctly. ')
if ans == 'h':
high = guess
elif ans == 'l':
low = guess
elif ans == 'c':
print("Game over. Your secret number was: " + str(guess))
break
else:
print('Sorry, I did not understand your input.')
guess = int((high + low) / 2) | [
"[email protected]"
] | |
c45abe95dc57a0148357504e44dbeff44b6b7f14 | c0df167bde5c7058523779064b24ec8d4ce7d85f | /users/models.py | 85b198c49b2dad468ff7e47a93d444118e882e2a | [] | no_license | dwierichs/groundstates | 04e6b16d611a7f4c6ab97e757aee8c297e389be9 | 1c3ac693199d76edc9ae1299fc5fc39c4aab4df6 | refs/heads/master | 2020-09-09T02:51:12.980918 | 2019-12-30T19:37:57 | 2019-12-30T19:37:57 | 221,323,279 | 0 | 0 | null | 2019-11-27T19:45:42 | 2019-11-12T22:21:32 | Python | UTF-8 | Python | false | false | 578 | py | from django.db import models
from django.contrib.auth.models import User
class Message(models.Model):
title = models.CharField( max_length=100, verbose_name='title' )
content = models.TextField( verbose_name='content' )
date_gen = models.DateTimeField( auto_now_add=True, verbose_name='written' )
date_mod = models.DateTimeField( auto_now=True, verbose_name='modified' )
author = models.ForeignKey( User, on_delete=models.CASCADE, verbose_name='author' )
def get_absolute_url(self):
return reverse('message-detail', kwargs={'pk': self.pk})
| [
"[email protected]"
] | |
896687158a03e59ecd7d16125d796285503b235f | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-ExternalAccessory/setup.py | 62ec8784efd44a69d8b17b2c2194741ceda7124b | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | '''
Wrappers for the "ExternalAccessory" framework on macOS.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
'''
from pyobjc_setup import setup, Extension
import os
VERSION="5.1.1"
setup(
name='pyobjc-framework-ExternalAccessory',
description = "Wrappers for the framework ExternalAccessory on macOS",
min_os_level="10.13",
packages = [ "ExternalAccessory" ],
ext_modules = [
Extension("ExternalAccessory._ExternalAccessory",
[ "Modules/_ExternalAccessory.m" ],
extra_link_args=["-framework", "ExternalAccessory"],
depends=[
os.path.join('Modules', fn)
for fn in os.listdir('Modules')
if fn.startswith('_ExternalAccessory')
]
),
],
version=VERSION,
install_requires = [
'pyobjc-core>='+VERSION,
'pyobjc-framework-Cocoa>='+VERSION,
],
long_description=__doc__,
)
| [
"[email protected]"
] | |
1053a9aeaa1f8564887592e20ef6ae75b81993a0 | aec5e05795dafd41efccd569f89fbe69bdf6db6a | /If_and_circulation.py | 79470bece896f0caea5cb18cf4b22ee0b32b2bd1 | [
"MIT"
] | permissive | Becavalier/playground-python | 959dcc52891b9b234b4c62531744f617249c289e | 23139e8bab0f5c712d38f378364842262d17e520 | refs/heads/master | 2021-06-01T14:58:49.431239 | 2016-09-02T01:17:07 | 2016-09-02T01:17:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# If
age = 3;
if age >= 18:
print('adult');
elif age >= 6:
print('teenager');
else:
print('kid');
s = input('birth: ');
birth = int(s);
if birth < 2000:
print('00前');
else:
print('00后');
# Circulation
names = ['Michael', 'Bob', 'Tracy'];
for name in names:
print(name);
sum = 0;
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
sum = sum + x;
print(sum);
# Python method: range()
sum = 0;
for x in range(101):
sum = sum + x;
print(sum);
# While
sum = 0;
n = 99;
while n > 0:
sum = sum + n;
n = n - 2;
print(sum);
| [
"[email protected]"
] | |
fef89ba2bf0b93db2e7c4e1a586f5f04ac27f5d6 | 3974cb24002f537507a87f80d57e7c0f9904645a | /main.py | ff34d4792fde7c8ab34035eabe7446f4270bac69 | [] | no_license | webargus/kivygenius | fc11ba51dbeb2e302b0125f678e3f68d8929e2c7 | e304f49d75449bea902f2f3edbcaee7c83bbd6e2 | refs/heads/master | 2020-06-25T13:55:41.394416 | 2018-12-14T02:18:33 | 2018-12-14T02:18:33 | 199,328,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py |
import kivy
kivy.require('1.1.3')
from kivy.core.audio import SoundLoader
from random import randint
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
class GeniusSounds:
sounds = [SoundLoader.load('sound_0.wav'), SoundLoader.load('sound_1.wav'), SoundLoader.load('sound_2.wav'),
SoundLoader.load('sound_3.wav'), SoundLoader.load('error.wav')]
def __init__(self):
for sound in self.sounds:
sound.seek(0)
sound.volume = 1
class GeniusSequence:
def __init__(self):
self.sequence = []
self.ix = 0
def seed(self):
self.sequence.append(randint(0, 3))
def clear(self):
del self.sequence[:]
self.ix = 0
def reset(self):
self.ix = 0
def is_last_seed(self):
return self.ix == len(self.sequence)
def next(self):
if self.is_last_seed():
return
self.ix += 1
def get(self):
if self.is_last_seed():
return -1
return self.sequence[self.ix]
def match(self, seed):
if self.is_last_seed():
return False
return self.sequence[self.ix] == int(seed)
class GeniusGridLayout(GridLayout):
def __init__(self, **kwargs):
super(GeniusGridLayout, self).__init__(**kwargs)
self.seq = GeniusSequence()
self.isRepeating = False
def btnReleased(self, btn_id):
# discard player clicks when game repeating sequence
if self.isRepeating:
return;
print("btn_id=", btn_id)
# advance sequence index to next button id if player got it right; add a new id if
# player reached the end of the sequence; end game if user got it wrong
if self.seq.match(btn_id):
GeniusSounds.sounds[int(btn_id)].play()
self.seq.next()
if self.seq.is_last_seed():
Clock.schedule_once(self.appendTouch, 2)
else:
GeniusSounds.sounds[-1].play()
print("Game Over!")
def startGame(self):
self.seq.clear()
# add first color button challenge to player
self.appendTouch()
# append another button press to the sequence and replay the whole sequence for the player
def appendTouch(self, *dt):
# generate new button random id between 0 and 3 and add it to the sequence
self.seq.seed()
# replay the sequence for the user
self.seq.reset()
self.isRepeating = True
Clock.schedule_interval(self.replaySequence, .5)
def replaySequence(self, *dt):
if self.seq.is_last_seed(): # no more seeds
Clock.unschedule(self.replaySequence)
# point sequence index back to first button
self.seq.reset()
# enable player clicks
self.isRepeating = False
else:
# get btn obj by id
btn_id = self.seq.get()
btn = self.ids['btn_' + str(btn_id)]
# toggle btn
if btn.state == 'normal':
GeniusSounds.sounds[btn_id].play()
btn.state = 'down'
else:
btn.state = 'normal'
self.seq.next()
class Genius(App):
def build(self):
return GeniusGridLayout()
def on_pause(self):
return True
if __name__ == '__main__':
Genius().run()
| [
"[email protected]"
] | |
fd907b6e8117b67e9f3b9091624fa8f6a1ec6b66 | 5c2a9ff7f70ffa87604214786538dbd5ed6bd7a3 | /029. Divide Two Integers.py | abf5a52d3b51c0609da9c26bb53857b4aa4fc5c7 | [
"MIT"
] | permissive | patrick-luo/Leet-Code | 470bb70db72772d2080d2b87f28a6ae44d35fdcf | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | refs/heads/master | 2021-07-15T01:37:00.409882 | 2019-02-06T20:02:05 | 2019-02-06T20:02:05 | 130,917,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | """This solution uses addition repititively
to find largest power n of 2 such that 2**n*b < a.
Then you keep subtracting these multiples of b of
the form 2**i*b for smaller powers until the remainder
is smaller than b. Each time you subtract,
take the previous quotient and multiply by two.
If there is any remaining in the end,
just multiply them all by 2 iteratively."""
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
# e.g. a=100, b=3
a, b = dividend, divisor
negative = False if (a>0) == (b>0) else True
a = a if a>0 else -a
b = b if b>0 else -b
mult = [b]
c = b
while c + c <= a:
c += c
mult.append(c)
# here len(mult)=6, then n=5, which is the largest n
# s.t. 2**n*b < a
# mult = [3,6,12,24,48,96]
# Here is the main step. After getting i's
# where 2**i*b < a, keeping substracing them
# from a and add the 2**i to 'out'.
i = 0
out = 0
while a >= b:
i += 1
out += out # here is to iteratively calcuate 2**i
if a >= mult[-i]:
a -= mult[-i]
out += 1
# If there is any remaining in the end,
# keeping finishing the calculation of 2**i
while i < len(mult):
out += out
i += 1
if negative:
out = -out
if out>2147483647 or out<-2147483648:
return 2147483647
else:
return out
| [
"[email protected]"
] | |
aba12166b4f1331a6a7bd342c544440999563e5d | 862869dce28929f24b8b354d60e6111968afd45d | /test.py | a74424bb5f9557b9ae0224752deb34ecd38d20d9 | [] | no_license | Sidney2408/SentimentAnalysis | 5a9893632019e52525b0b3ba7fd15d925161aac1 | 28aae96090e279c92503df41cfdf9faa741c25a8 | refs/heads/master | 2021-03-16T10:22:42.521107 | 2017-12-06T07:45:40 | 2017-12-06T07:45:40 | 111,277,068 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 18 15:09:39 2017
@author: Sidney
"""
testDict = {'完美': {'B-positive': {'count': 1}, 'O': {'count': 1}}, '幸福': {'O': {"count": 0}}}
tagCount = {'B-positive':1,'O':2, 'B-negative':3}
parameter = "exy"
#max(testDict['a']["parameters"], key=testDict['a']["parameters"].get)
def buildParameters(dictionary, tagCount):
for key, value in dictionary.items():
parameters = {}
for subKey,subvalue in value.items():
parameters[subKey] = subvalue["count"]/tagCount[subKey]
dictionary[key]["parameters"] = parameters
return dictionary
#Backup function
"""
def nestedDictProcess(dictionary,key,subKey):
if key not in dictionary:
dictionary[key]={}
dictionary[key][subKey] = {"count":0}#The 0 is wrong
else:
if subKey not in dictionary[key]:
dictionary[key][subKey] = {"count":0}
else:
dictionary[key][subKey]["count"] = dictionary[key][subKey].get("count",0)+1 #Increment the count
def buildParameters(dictionary, tagCount):
for key, value in dictionary.items():
parameters = {}
for subKey,subvalue in value.items():
parameters[subKey] = subvalue["count"]/tagCount[subKey]
dictionary[key]["parameters"] = parameters
return dictionary
"""
#TODO: decide if you want to do {count,paramter} structure
def nestedDictProcess(dictionary,key,subKey):
if key not in dictionary:
dictionary[key]={}
dictionary[key]["count"] = {subKey:1}
else:
dictionary[key]["count"][subKey] = dictionary[key]["count"].get(subKey,0)+1 #Increment the count
def buildParameters(dictionary, tagCount):
for key, value in dictionary.items():
parameters = {}
for subKey,subvalue in value["count"].items():
parameters[subKey] = subvalue/tagCount[subKey]
dictionary[key]["parameters"] = parameters
return dictionary
sentimentSets = ["START","STOP","O","B-positive","I-positive","B-neutral","I-neutral","B-negative","I-negative"]
fileName = "EN"
def detectAnomalies(fileDir):
with open('{0}\modifiedTrain.txt'.format(fileDir), 'r',encoding='utf-8') as modTrainSet:
trainSetString = modTrainSet.read()
trainSetLines = trainSetString.splitlines(True)
sentences = 0
index = 0
indices = ""
for i in trainSetLines:
index+= 1
data = i.split(" ")
if(len(data)==2):
word = data[0]
tag = data[1].rstrip('\n')
if(word == '' or tag not in sentimentSets):
print("Corrupted data detected: {0}".format(i))
indices= indices +"{0} {1}\n".format(i,index)
elif(i == '\n'):
sentences +=1
else:
print("Corrupted data detected: {0}".format(i))
indices= indices +"{0} {1}\n".format(i,index)
indices= "Sentences {0}".format(sentences)+indices
with open('{0}\\variables\errors.txt'.format(fileDir), 'w',encoding='utf-8') as outputFile:
outputFile.write(indices)
detectAnomalies("SG")
| [
"[email protected]"
] | |
fe2a24b471a9649d963cd55489d71e9442b01624 | cbc829f5787b770c9184b91ee470d058cc4cbe65 | /tree/173_二叉搜索树迭代器.py | 4cefd23be018f9de7418b1283f2d1b15bcd411b5 | [] | no_license | SilvesSun/learn-algorithm-in-python | 58815e7e85e767cbc4a9c21e36e7bdede4f32bef | 5ba3465ba9c85955eac188e1e3793a981de712e7 | refs/heads/master | 2022-09-19T05:10:26.783943 | 2022-09-10T04:56:43 | 2022-09-10T04:56:43 | 115,470,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | # 实现一个二叉搜索树迭代器类BSTIterator ,表示一个按中序遍历二叉搜索树(BST)的迭代器:
#
#
#
# BSTIterator(TreeNode root) 初始化 BSTIterator 类的一个对象。BST 的根节点 root 会作为构造函数的一部分给出
# 。指针应初始化为一个不存在于 BST 中的数字,且该数字小于 BST 中的任何元素。
# boolean hasNext() 如果向指针右侧遍历存在数字,则返回 true ;否则返回 false 。
# int next()将指针向右移动,然后返回指针处的数字。
#
#
# 注意,指针初始化为一个不存在于 BST 中的数字,所以对 next() 的首次调用将返回 BST 中的最小元素。
#
#
#
# 你可以假设 next() 调用总是有效的,也就是说,当调用 next() 时,BST 的中序遍历中至少存在一个下一个数字。
#
#
#
# 示例:
#
#
# 输入
# ["BSTIterator", "next", "next", "hasNext", "next", "hasNext", "next", "hasNext
# ", "next", "hasNext"]
# [[[7, 3, 15, null, null, 9, 20]], [], [], [], [], [], [], [], [], []]
# 输出
# [null, 3, 7, true, 9, true, 15, true, 20, false]
#
# 解释
# BSTIterator bSTIterator = new BSTIterator([7, 3, 15, null, null, 9, 20]);
# bSTIterator.next(); // 返回 3
# bSTIterator.next(); // 返回 7
# bSTIterator.hasNext(); // 返回 True
# bSTIterator.next(); // 返回 9
# bSTIterator.hasNext(); // 返回 True
# bSTIterator.next(); // 返回 15
# bSTIterator.hasNext(); // 返回 True
# bSTIterator.next(); // 返回 20
# bSTIterator.hasNext(); // 返回 False
#
#
#
#
# 提示:
#
#
# 树中节点的数目在范围 [1, 105] 内
# 0 <= Node.val <= 106
# 最多调用 105 次 hasNext 和 next 操作
#
#
#
#
# 进阶:
#
#
# 你可以设计一个满足下述条件的解决方案吗?next() 和 hasNext() 操作均摊时间复杂度为 O(1) ,并使用 O(h) 内存。其中 h 是树的高
# 度。
#
# Related Topics 栈 树 设计 二叉搜索树 二叉树 迭代器
# 👍 497 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class BSTIterator:
def __init__(self, root: TreeNode):
self.idx = -1
self.arr = []
self.inorder(root)
def inorder(self, root):
if not root: return
self.inorder(root.left)
self.arr.append(root.val)
self.inorder(root.right)
def next(self) -> int:
self.idx += 1
v = self.arr[self.idx]
return v
def hasNext(self) -> bool:
return self.idx < len(self.arr) - 1
# Your BSTIterator object will be instantiated and called as such:
# obj = BSTIterator(root)
# param_1 = obj.next()
# param_2 = obj.hasNext()
# leetcode submit region end(Prohibit modification and deletion)
| [
"[email protected]"
] | |
d0e08ebeff69ca3d2ee2546b4de3233781b110db | 7db2169805e5bb907bb2abb90516e2e25ac197f7 | /backgroundRemoval_video.py | 366019dd9361a18acea5004198b970f8b185d332 | [] | no_license | STARJAN-J/backgroundRemoval | ffc9d9ee328bd2e67d1955adf8615ecea3487e96 | d285d90c1f88e9b405c4f73cf0570bd6b4d1d81e | refs/heads/master | 2023-03-16T10:10:26.889664 | 2020-05-11T11:39:04 | 2020-05-11T11:39:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | import cv2
import numpy as np
import sys
def resize(dst,img):
width = img.shape[1]
height = img.shape[0]
dim = (width, height)
resized = cv2.resize(dst, dim, interpolation = cv2.INTER_AREA)
return resized
video = cv2.VideoCapture(1)
oceanVideo = cv2.VideoCapture("ocean.mp4")
success, ref_img = video.read()
flag = 0
while(1):
success, img = video.read()
success2, bg = oceanVideo.read()
bg = resize(bg,ref_img)
if flag==0:
ref_img = img
# create a mask
diff1=cv2.subtract(img,ref_img)
diff2=cv2.subtract(ref_img,img)
diff = diff1+diff2
diff[abs(diff)<13.0]=0
gray = cv2.cvtColor(diff.astype(np.uint8), cv2.COLOR_BGR2GRAY)
gray[np.abs(gray) < 10] = 0
fgmask = gray.astype(np.uint8)
fgmask[fgmask>0]=255
#invert the mask
fgmask_inv = cv2.bitwise_not(fgmask)
#use the masks to extract the relevant parts from FG and BG
fgimg = cv2.bitwise_and(img,img,mask = fgmask)
bgimg = cv2.bitwise_and(bg,bg,mask = fgmask_inv)
#combine both the BG and the FG images
dst = cv2.add(bgimg,fgimg)
cv2.imshow('Background Removal',dst)
key = cv2.waitKey(5) & 0xFF
if ord('q') == key:
break
elif ord('d') == key:
flag = 1
print("Background Captured")
elif ord('r') == key:
flag = 0
print("Ready to Capture new Background")
cv2.destroyAllWindows()
video.release()
#return jpeg.tobytes() | [
"[email protected]"
] | |
72e86264e0ba09cd634e0fe560f491351c2e26a7 | 3da865a605e2513aadd3eace41b970fd7aad249a | /2001Mail/ia2001xpy/__init__.py | 39ab89eb9ca72cc737742b1e27df1a910be7fd79 | [
"Apache-2.0"
] | permissive | rafaelstyvie/2001mail | 1d5a1b0651139a24e466eb27b2394653850c07e5 | e3a777a885d62ec27d1f0ee56557a5d1b054f6ef | refs/heads/master | 2020-06-29T02:10:38.147025 | 2016-11-22T09:38:23 | 2016-11-22T09:38:23 | 74,457,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | """
ia2001x Cryptographic Tools.
"""
| [
"[email protected]"
] | |
e2a89d771342738284db5d3d94106bd5ef73154d | 2fa6b272e79ff938ddd38d96edc454d05d247baf | /project_body/srv/code/python/hazelcast/serialization/api.py | 8b5108587cbcee64fc838f0400f4c2f8799c8f1c | [
"Apache-2.0"
] | permissive | ArthurConan/FunFic | e1b4d5bcd23636c4fb6999e9e0437bb7c879e9d1 | ad8f7849a2aff995a5569b92dc1ed9d53e3b09d6 | refs/heads/master | 2021-01-21T06:52:28.212648 | 2017-05-28T21:57:13 | 2017-05-28T21:57:13 | 91,589,641 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,256 | py | """
User API and docs
"""
class ObjectDataOutput(object):
def write_from(self, buff, offset=None, length=None):
"""
Writes the content of the buffer to this output stream
:param buff: input buffer
:param offset: offset of the buffer where copy begin
:param length: length of data to be copied from the offset into stream
"""
raise NotImplementedError()
def write_boolean(self, val):
"""
Writes a boolean value to this output stream
single byte value 1 represent True, 0 represent False
:param val: the boolean to be written
"""
raise NotImplementedError()
def write_byte(self, val):
raise NotImplementedError()
def write_short(self, val):
raise NotImplementedError()
def write_char(self, val):
raise NotImplementedError()
def write_int(self, val):
raise NotImplementedError()
def write_long(self, val):
raise NotImplementedError()
def write_float(self, val):
raise NotImplementedError()
def write_double(self, val):
raise NotImplementedError()
def write_bytes(self, string):
raise NotImplementedError()
def write_chars(self, val):
raise NotImplementedError()
def write_utf(self, val):
raise NotImplementedError()
def write_byte_array(self, val):
raise NotImplementedError()
def write_boolean_array(self, val):
raise NotImplementedError()
def write_char_array(self, val):
raise NotImplementedError()
def write_int_array(self, val):
raise NotImplementedError()
def write_long_array(self, val):
raise NotImplementedError()
def write_double_array(self, val):
raise NotImplementedError()
def write_float_array(self, val):
raise NotImplementedError()
def write_short_array(self, val):
raise NotImplementedError()
def write_utf_array(self, val):
raise NotImplementedError()
def write_object(self, val):
raise NotImplementedError()
def write_data(self, val):
raise NotImplementedError()
def to_byte_array(self):
raise NotImplementedError()
def get_byte_order(self):
raise NotImplementedError()
class ObjectDataInput(object):
def read_into(self, buff, offset=None, length=None):
raise NotImplementedError()
def skip_bytes(self, count):
raise NotImplementedError()
def read_boolean(self):
raise NotImplementedError()
def read_byte(self):
raise NotImplementedError()
def read_unsigned_byte(self):
raise NotImplementedError()
def read_short(self):
raise NotImplementedError()
def read_unsigned_short(self):
raise NotImplementedError()
def read_int(self):
raise NotImplementedError()
def read_long(self):
raise NotImplementedError()
def read_float(self):
raise NotImplementedError()
def read_double(self):
raise NotImplementedError()
def read_utf(self):
raise NotImplementedError()
def read_byte_array(self):
raise NotImplementedError()
def read_boolean_array(self):
raise NotImplementedError()
def read_char_array(self):
raise NotImplementedError()
def read_int_array(self):
raise NotImplementedError()
def read_long_array(self):
raise NotImplementedError()
def read_double_array(self):
raise NotImplementedError()
def read_float_array(self):
raise NotImplementedError()
def read_short_array(self):
raise NotImplementedError()
def read_utf_array(self):
raise NotImplementedError()
def read_object(self):
raise NotImplementedError()
def read_data(self):
raise NotImplementedError()
def get_byte_order(self):
raise NotImplementedError()
class IdentifiedDataSerializable(object):
def write_data(self, object_data_output):
raise NotImplementedError("read_data must be implemented to serialize this IdentifiedDataSerializable")
def read_data(self, object_data_input):
raise NotImplementedError("read_data must be implemented to deserialize this IdentifiedDataSerializable")
def get_factory_id(self):
raise NotImplementedError("This method must return the factory ID for this IdentifiedDataSerializable")
def get_class_id(self):
raise NotImplementedError("This method must return the class ID for this IdentifiedDataSerializable")
class Portable(object):
def write_portable(self, writer):
raise NotImplementedError()
def read_portable(self, reader):
raise NotImplementedError()
def get_factory_id(self):
raise NotImplementedError()
def get_class_id(self):
raise NotImplementedError()
class StreamSerializer(object):
def write(self, out, obj):
raise NotImplementedError("write method must be implemented")
def read(self, inp):
raise NotImplementedError("write method must be implemented")
def get_type_id(self):
raise NotImplementedError("get_type_id must be implemented")
def destroy(self):
raise NotImplementedError()
class PortableReader(object):
def get_version(self):
raise NotImplementedError()
def has_field(self, field_name):
raise NotImplementedError()
def get_field_names(self):
raise NotImplementedError()
def get_field_type(self, field_name):
raise NotImplementedError()
def get_field_class_id(self, field_name):
raise NotImplementedError()
def read_int(self, field_name):
raise NotImplementedError()
def read_long(self, field_name):
raise NotImplementedError()
def read_utf(self, field_name):
raise NotImplementedError()
def read_boolean(self, field_name):
raise NotImplementedError()
def read_byte(self, field_name):
raise NotImplementedError()
def read_char(self, field_name):
raise NotImplementedError()
def read_double(self, field_name):
raise NotImplementedError()
def read_float(self, field_name):
raise NotImplementedError()
def read_short(self, field_name):
raise NotImplementedError()
def read_portable(self, field_name):
raise NotImplementedError()
def read_byte_array(self, field_name):
raise NotImplementedError()
def read_boolean_array(self, field_name):
raise NotImplementedError()
def read_char_array(self, field_name):
raise NotImplementedError()
def read_int_array(self, field_name):
raise NotImplementedError()
def read_long_array(self, field_name):
raise NotImplementedError()
def read_double_array(self, field_name):
raise NotImplementedError()
def read_float_array(self, field_name):
raise NotImplementedError()
def read_short_array(self, field_name):
raise NotImplementedError()
def read_utf_array(self, field_name):
raise NotImplementedError()
def read_portable_array(self, field_name):
raise NotImplementedError()
def get_raw_data_input(self):
raise NotImplementedError()
class PortableWriter(object):
def write_int(self, field_name, value):
raise NotImplementedError()
def write_long(self, field_name, value):
raise NotImplementedError()
def write_utf(self, field_name, value):
raise NotImplementedError()
def write_boolean(self, field_name, value):
raise NotImplementedError()
def write_byte(self, field_name, value):
raise NotImplementedError()
def write_char(self, field_name, value):
raise NotImplementedError()
def write_double(self, field_name, value):
raise NotImplementedError()
def write_float(self, field_name, value):
raise NotImplementedError()
def write_short(self, field_name, value):
raise NotImplementedError()
def write_portable(self, field_name, portable):
raise NotImplementedError()
def write_null_portable(self, field_name, factory_id, class_id):
raise NotImplementedError()
def write_byte_array(self, field_name, values):
raise NotImplementedError()
def write_boolean_array(self, field_name, values):
raise NotImplementedError()
def write_char_array(self, field_name, values):
raise NotImplementedError()
def write_int_array(self, field_name, values):
raise NotImplementedError()
def write_long_array(self, field_name, values):
raise NotImplementedError()
def write_double_array(self, field_name, values):
raise NotImplementedError()
def write_float_array(self, field_name, values):
raise NotImplementedError()
def write_short_array(self, field_name, values):
raise NotImplementedError()
def write_utf_array(self, field_name, values):
raise NotImplementedError()
def write_portable_array(self, field_name, values):
raise NotImplementedError()
def get_raw_data_output(self):
raise NotImplementedError()
| [
"[email protected]"
] | |
39371265fd7d53cd83ff9f02deb3c25e8b131682 | aeb8c63067662f97abbe1bc0cbbc02cff572230d | /pavi95.py | 0506cb70bdf838061bbeadeb3d6cf412385fcaff | [] | no_license | paviprajansai/pavi83.py | f1ba8551a3e4cdb111a5cc6dbd3319e756b2c3bf | 3ee907fff76b8d9ae5940726e4ecbaaf1a9bad6f | refs/heads/master | 2020-06-27T15:44:03.687416 | 2019-08-01T07:04:12 | 2019-08-01T07:04:12 | 199,990,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | p1,t1,r1=map(int,input().split())
s1=(p1*t1*r1)//100
print(s1)
| [
"[email protected]"
] | |
1a77f6bb4cd19e4a29d917748a1c4dad514e0bf8 | 090203cff03e2831acdc17a15ea5bc217dcfd57f | /2015/src/day_06.py | b0b7562ea19ed666c1c9a37de4ab74af492d7038 | [
"MIT"
] | permissive | Stannislav/Advent-of-Code | 77847889a95bad45f5d6fa32dfbcf984136d19ae | cdc1ea0f23a532fe1eb909beb2a64112456613b0 | refs/heads/master | 2023-01-12T11:22:56.846890 | 2023-01-01T18:56:05 | 2023-01-01T18:56:05 | 212,850,049 | 4 | 0 | MIT | 2021-04-30T17:44:50 | 2019-10-04T15:43:38 | Python | UTF-8 | Python | false | false | 1,641 | py | #!/usr/bin/env python3
field = [[0 for i in range(1000)] for j in range(1000)]
with open("../input/input_06.txt") as f:
for line in f:
l = line.rstrip().split()
if l[0] == "toggle":
sx, sy = map(int, l[1].split(','))
ex, ey = map(int, l[3].split(','))
for y in range(sy, ey + 1):
for x in range(sx, ex + 1):
field[y][x] = 1 - field[y][x]
else:
sx, sy = map(int, l[2].split(','))
ex, ey = map(int, l[4].split(','))
for y in range(sy, ey + 1):
for x in range(sx, ex + 1):
field[y][x] = 1 if l[1] == "on" else 0
cnt = 0
for i in range(1000):
cnt += field[i].count(1)
print(cnt)
field = [[0 for i in range(1000)] for j in range(1000)]
with open("../input/input_06.txt") as f:
for line in f:
l = line.rstrip().split()
if l[0] == "toggle":
sx, sy = map(int, l[1].split(','))
ex, ey = map(int, l[3].split(','))
for y in range(sy, ey + 1):
for x in range(sx, ex + 1):
field[y][x] += 2
else:
sx, sy = map(int, l[2].split(','))
ex, ey = map(int, l[4].split(','))
for y in range(sy, ey + 1):
for x in range(sx, ex + 1):
if l[1] == "on":
field[y][x] += 1
else:
field[y][x] -= 1
if field[y][x] < 0:
field[y][x] = 0
cnt = 0
for i in range(1000):
cnt += sum(field[i])
print(cnt)
| [
"[email protected]"
] | |
f15516f5ce98ea2e188e13a9c510eba39b9141af | 5779d964d5ee42b586697a640ff0f977e0fa1e55 | /synclient/model/user_group.py | 8ae95ea7eb5b838a175c05d951d6c240f4354232 | [] | no_license | thomasyu888/synpy-sdk-client | 03db42c3c8411c8c1f8808e1145d7c2a8bcc3df1 | d1e19e26db5376c78c4ce0ff181ac3c4e0709cbb | refs/heads/main | 2023-02-28T09:33:12.386220 | 2021-02-02T15:09:59 | 2021-02-02T15:09:59 | 333,744,741 | 3 | 0 | null | 2021-01-30T12:10:50 | 2021-01-28T11:57:48 | Python | UTF-8 | Python | false | false | 7,476 | py | """
Platform Repository Service
Platform Repository Service - Sage Bionetworks Platform # noqa: E501
The version of the OpenAPI document: develop-SNAPSHOT
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from synclient.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class UserGroup(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'creation_date': (str,), # noqa: E501
'etag': (str,), # noqa: E501
'id': (str,), # noqa: E501
'is_individual': (bool,), # noqa: E501
'uri': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'creation_date': 'creationDate', # noqa: E501
'etag': 'etag', # noqa: E501
'id': 'id', # noqa: E501
'is_individual': 'isIndividual', # noqa: E501
'uri': 'uri', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UserGroup - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
creation_date (str): The date this user or group was created.. [optional] # noqa: E501
etag (str): Synapse employs an Optimistic Concurrency Control (OCC) scheme to handle concurrent updates. Since the E-Tag changes every time an entity is updated it is used to detect when a client's current representation of an entity is out-of-date. . [optional] # noqa: E501
id (str): The id of either the user or group.. [optional] # noqa: E501
is_individual (bool): True if this is a user, false if it is a group. [optional] # noqa: E501
uri (str): Uri. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
6dc7a828dc8ee518b28381211c2f5c1c5ae541c9 | 8a9fa73357aa87ad5b14deb15668a411f487dbde | /getrawvideos.py | 7cb5c958d17c0646dfb1d37babfdf01fc0efb3bc | [
"MIT"
] | permissive | wanghaisheng/another-autoytb-copycat | 55980c8f1875b422db5c7b446c00a6ecc5062e49 | 91daa08d9e8eec164ce9160830927fbfecf9e68c | refs/heads/main | 2023-08-19T01:35:06.459038 | 2021-10-15T11:20:04 | 2021-10-15T11:20:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,761 | py | import requests
import json
import yt_dlp
import os
import subprocess
results =[]
session = requests.session()
def stringify(obj: dict) -> dict:
"""turn every value in the dictionary to a string"""
for k, v in obj.items():
if isinstance(v, dict):
# if value is a dictionary, stringifiy recursively
stringify(v)
continue
if not isinstance(v, str):
if isinstance(v, bool):
# False/True -> false/true
obj[k] = str(v).lower()
else:
obj[k] = str(v)
return obj
with open('cookies.json') as f:
cookie_list: list = json.load(f)
# create the cookie jar from the first cookie
cookie_jar = requests.utils.cookiejar_from_dict(stringify(cookie_list[0]))
# append the rest of the cookies
for cookie in cookie_list[1:]:
requests.utils.add_dict_to_cookiejar(cookie_jar, stringify(cookie))
session.cookies = cookie_jar
idlist=[]
for i in range(1,20):
payload = {'search_type': 'video', 'keyword': '阿瓦达索命咒','order':'pubdate','duration':'0','page':i}
r = session.get('http://api.bilibili.com/x/web-interface/search/type', params=payload)
# print(r.json().keys())
results.extend(r.json()["data"]["result"])
# result={"title":'阿瓦达索命咒 compilation'+str(i),
# "tag":item['tag'],
# "description":"automatically bot generate from internet.any copyright issue pls contact us\r\n"}
if not os.path.exists('harry'+os.sep+str(i)):
os.makedirs('harry'+os.sep+str(i))
os.chdir('harry'+os.sep+str(i))
for i,item in enumerate(r.json()["data"]["result"]):
ydl_opts = {'retries': 10}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
print('donwloading----',item['arcurl'])
# ydl.download(item['arcurl'])
ydl.download([item['arcurl']])
with open('test.sh','w') as f:
f.write('for f in *.flv ; do echo file \'$f\' >> list.txt; done && ffmpeg -f concat -safe 0 -i list.txt -s 1280x720 -crf 24 stitched-video.mp4 && rm list.txt')
f.write('\r')
f.write('for f in *.mp4 ; do echo file \'$f\' >> list.txt; done && ffmpeg -f concat -safe 0 -i list.txt -s 1280x720 -crf 24 stitched-video.mp4 && rm list.txt')
subprocess.call(['sh', './test.sh'])
os.chdir('../../')
# 哈利波特 魔法覺醒,哈利波特魔法覺醒卡組,哈利波特 教學,索命咒,索命咒卡組,佛地魔卡組,啊哇呾喀呾啦,哈利波特 秒殺流, Action-adventure game,Action game,Role-playing video game,Strategy video game,Video game culture
| [
"[email protected]"
] | |
a37803862dbe9a04b5150ba8f0352125c761e519 | 39c40326a457e463c96b5a822a0a9bd8b8b35848 | /Unit6/word_list.py | 64aa894ed3a4ee4014bd07dba35ec9eb1601d1a7 | [] | no_license | zhangbios/LearnBlack | 21d3ad5a52210da7b64d1adb3e13fbcd90e3b985 | 5fd11d62a0786c71403d05cc236ed11b8e861b21 | refs/heads/master | 2021-05-02T13:22:22.532178 | 2018-02-08T12:28:50 | 2018-02-08T12:28:50 | 120,758,485 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,736 | py | # encoding: utf-8
"""
"""
from burp import IBurpExtender
from burp import IContextMenuFactory
from javax.swing import JMenuItem
from java.util import List, ArrayList
from java.net import URL
import re
from datetime import datetime
from HTMLParser import HTMLParser
class TagStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.page_text = []
def handle_data(self, data):
self.page_text.append(data)
def handle_comment(self, data):
self.handle_data(data)
def strip(self, html):
self.feed(html)
return " ".join(self.page_text)
class BurpExtender(IBurpExtender, IContextMenuFactory):
def registerExtenderCallbacks(self,callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
self.context = None
self.hosts = set()
self.wordlist = set(["password"])
callbacks.setExtensionName("Bios wordlist")
callbacks.registerContextMenuFactory(self)
return
def createMenuItems(self, context_menu):
self.context = context_menu
menu_list = ArrayList()
menu_list.add(JMenuItem("Create Wordlist", actionPerformed=self.wordlist_menu))
return menu_list
# 功能实现
def wordlist_menu(self,event):
# 抓取用户点击的细节
http_traffic = self.context.getSelectedMessages()
for traffic in http_traffic:
http_service = traffic.getHttpService()
host = http_service.getHost()
self.hosts.add(host)
http_response = traffic.getResponse()
if http_response:
self.get_words(http_response)
self.display_wordlist()
return
def get_words(self, http_response):
headers,body = http_response.tostring().split('\r\n\r\n',1)
if headers.lower().find("content-type: text") == 1:
return
tag_stripper = TagStripper()
page_text = tag_stripper.strip(body)
words = re.findall("[a-zA-Z]\w{2,}",page_text)
for word in words:
if len(word) <= 12:
self.wordlist.add(word.lower())
return
def mangle(self, word):
year = datetime.now().year
suffixes = ["", "1", "!", year]
mangled = []
for password in (word, word.capitalize()):
for suffix in suffixes:
mangled.append("{}{}".format(password,suffix))
return mangled
def display_wordlist(self):
print("#!comment: Bios Wordlist for site(s) {}",",".join(self.hosts))
for word in sorted(self.wordlist):
for password in self.mangle(word):
print(password)
return | [
"[email protected]"
] | |
65e04b9e520a893fe0b6a0db678f1bfe2d40bb3a | 182b3ea03761008ccf251d9591f910581370a8a6 | /tests/unit_tests/test_commands_assign_ta_to_course.py | b62548a1efb2dba1de991fdc8391a3b531151f9f | [] | no_license | JaiXiong/ta_web_app | 0d80640ade65112a936df18b73fe73a96e7f888c | 389ea7a6784b958b793b2ff48edaf4136cbb81b2 | refs/heads/master | 2021-10-08T16:59:13.119946 | 2018-12-13T22:13:43 | 2018-12-13T22:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | from django.test import TestCase
from website.models import Course, Account
from ta_app.commands import Commands
class TestCommandsAssignTaToCourse(TestCase):
def setUp(self):
self.Course1 = Course.objects.create(name="CS-337",
section="004",
days_of_week="M/W/F",
start_time="11:00",
end_time="11:50")
self.Course1.save()
self.Course2 = Course.objects.create(name="CS-361",
section="003",
days_of_week="M/W",
start_time="11:00",
end_time="13:15")
self.Course2.save()
self.Account1 = Account.objects.create(user="Ross",
password="nonsense",
role="Supervisor")
self.Account1.save()
self.Account2 = Account.objects.create(user="Joe",
password="nonsense",
role="TA")
self.Account2.save()
self.co = Commands()
self.co.current_user = self.Account1
# test roles other than supervisor can not assign tas to courses
def test_CommandsAssignTaToCourse1(self):
self.co.current_user = Account()
response = self.co.assign_ta_to_course('Joe', 'CS-361')
self.assertEqual(response, "You do not have permissions to assign TAs to courses")
# test assigning a user not in the data base
def test_CommandsAssignTaToCourse2(self):
response = self.co.assign_ta_to_course('Gus', 'CS-361')
self.assertEqual(response, "This user is not present in the data base")
# test assigning an TA to course not in the data base
def test_CommandsAssignTaToCourse3(self):
response = self.co.assign_ta_to_course('Joe', 'CS-999')
self.assertEqual(response, "This course is not present in the data base")
# test assigning TA to course that conflicts with their schedule
def test_CommandsAssignTaToCourse4(self):
self.co.assign_ta_to_course('Joe', 'CS-337')
response = self.co.assign_ta_to_course('Joe', 'CS-361')
self.assertEqual(response, "This course conflicts with the TA's current schedule")
# test calling method without all valid parameters
def test_CommandsAssignTaToCourse5(self):
response = self.co.assign_instructor('Joe')
self.assertEqual(response, "Please input valid arguments for both fields to create assign "
"an instructor to a course")
# test that instructor is added to course wit proper conditions
def test_CommandsAssignTaToCourse6(self):
response = self.co.assign_ta_to_course('Joe', 'CS-361')
self.assertEqual(response, 'Joe has been added to as a TA to CS-361')
assigned = Course.objects.filter(name='CS-361', tas=self.Account2).exists()
self.assertEqual(True, assigned)
| [
"[email protected]"
] | |
2faacdb507a4c8589c27ab84d6b3683d929c425e | ff451381a5d74e4bff58e9c2d5bf6e448a1d4637 | /main.py | 388d3310f87ad5b9dfc7670b3fc2e772a888015a | [] | no_license | Nowens17/web-caesar | 3d37e299ffe8b1eb561298ef6823fcb8c5259d1c | c8050651a7e308752700065b0c8dd5949ca05170 | refs/heads/master | 2021-04-15T09:15:38.324600 | 2018-03-25T14:07:14 | 2018-03-25T14:07:14 | 126,695,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | from flask import Flask, request
from caesar import rotate_string
app = Flask(__name__)
app.config['DEBUG'] = True
form = """
<!DOCTYPE html>
<html>
<head>
<style>
form {{
background-color: #eee;
padding: 20px;
margin: 0 auto;
width: 540px;
font: 16px sans-serif;
border-radius: 10px;
}}
textarea {{
margin: 10px 0;
width: 540px;
height: 120px;
}}
</style>
</head>
<body>
<form action="/" method="post">
<label for="rotate">Rotate by:</label>
<input type="text" name="rot" value="0"/>
</br>
<textarea name="text">{0}</textarea>
</br>
<input type="submit">
</form>
</body>
</html>
"""
@app.route("/", methods=['POST'])
def encrypt():
rot_data = int(request.form['rot'])
text_data = request.form['text']
encrypted_string = rotate_string(text_data, rot_data)
return form.format(encrypted_string)
@app.route("/")
def index():
return form.format("")
app.run() | [
"[email protected]"
] | |
fc38d6f55c918759f81ddaf951a6d77536881d94 | dd449ad8388847779b265f49f2339c9681376c60 | /flask/c6_avatar/config.py | c04cb5719b0d47de5fe50590b3bf8103af2c1a85 | [] | no_license | whoji/training-ground | 478d76a8c274050eb910b28729ca1d1cdb47eae9 | b107cc47c4a04bb8868c410ab207bacab5a86e4c | refs/heads/master | 2020-05-16T16:13:26.788156 | 2019-12-04T01:56:01 | 2019-12-04T01:56:01 | 183,154,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
'''
it is in general a good practice to set configuration from
environment variables, and provide a fallback value when the
environment does not define the variable.
'''
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False | [
"[email protected]"
] | |
06766582c327781e0e4b313477641e0d7441f800 | b3ed082c0413fb8eef32094ba0535bee7517ee87 | /benchmarks/prepare-executables.py | ddac74d5dca04f6f152c44623045b50eb6bfaaab | [
"BSD-3-Clause"
] | permissive | sureyeaah/dex-lang | 60ec1472aba962cd0896fdb65350ceba8daffc54 | 8806d9be61fcc44fe52fa6d3e8339a8bd15f7aef | refs/heads/main | 2023-03-08T03:50:00.482085 | 2020-10-08T14:05:09 | 2020-10-09T15:32:49 | 318,612,484 | 0 | 0 | BSD-3-Clause | 2020-12-04T19:10:23 | 2020-12-04T19:10:22 | null | UTF-8 | Python | false | false | 7,552 | py | import sys
import os
import struct
from pathlib import Path
from itertools import product
RODINIA_ROOT = Path('rodinia') / 'rodinia'
PARBOIL_DATA_ROOT = Path('parboil') / 'data'
if not RODINIA_ROOT.exists():
print("Rodinia benchmark suite missing. Please download it and place it in a "
"`rodinia/rodinia` subdirectory.")
sys.exit(1)
if not PARBOIL_DATA_ROOT.exists():
print("Parboil benchmark suite missing. Please download the datasets and place them "
"in a `parboil/data` subdirectory.")
sys.exit(1)
EXE_ROOT = Path('exe')
RODINIA_EXE_ROOT = EXE_ROOT / 'rodinia'
PARBOIL_EXE_ROOT = EXE_ROOT / 'parboil'
# TODO: An easy way to do this would be to wrap the parameters in arrays, but
# that doesn't work when the return type depends on any of them.
PREVENT_PARAMETER_INLINING = True
PRINT_OUTPUTS = False
def prepare_rodinia_kmeans():
kmeans_data_path = RODINIA_ROOT / 'data' / 'kmeans'
kmeans_data_files = [
(kmeans_data_path / '100', 10),
(kmeans_data_path / '204800.txt', 8),
(kmeans_data_path / 'kdd_cup', 5),
]
kmeans_exe_path = RODINIA_EXE_ROOT / 'kmeans'
kmeans_exe_path.mkdir(parents=True, exist_ok=True)
for (df, k) in kmeans_data_files:
with open(df, 'r') as f:
lines = [l for l in f.read().split('\n') if l]
vals = [map(ensure_float, l.split()[1:]) for l in lines]
case_exe_path = kmeans_exe_path / (df.stem + '.dx')
with open(case_exe_path, 'w') as f:
emit_dex(f, 'rodinia', 'kmeans', [
('points', format_matrix(vals)),
('k', k),
('threshold', 0),
('max_iterations', 500),
])
print(f'Created {case_exe_path}')
def prepare_rodinia_hotspot():
data_path = RODINIA_ROOT / 'data' / 'hotspot'
data_files = [(data_path / f'temp_{size}', data_path / f'power_{size}', size)
for size in (64, 512, 1024)]
exe_path = RODINIA_EXE_ROOT / 'hotspot'
exe_path.mkdir(parents=True, exist_ok=True)
for (tf, pf, size) in data_files:
with open(tf, 'r') as f:
tvals = [l for l in f.read().split('\n') if l]
with open(pf, 'r') as f:
pvals = [l for l in f.read().split('\n') if l]
ts = list(chunk(tvals, size))
ps = list(chunk(pvals, size))
case_exe_path = exe_path / f'{size}.dx'
with open(case_exe_path, 'w') as f:
emit_dex(f, 'rodinia', 'hotspot', [
('numIterations', 360),
('T', format_matrix(ts)),
('P', format_matrix(ps))
])
print(f'Created {case_exe_path}')
def prepare_rodinia_backprop():
exe_path = RODINIA_EXE_ROOT / 'backprop'
exe_path.mkdir(parents=True, exist_ok=True)
exe_path_ad = RODINIA_EXE_ROOT / 'backprop-ad'
exe_path_ad.mkdir(parents=True, exist_ok=True)
in_features = [512, 123]
for inf, use_ad in product(in_features, (False, True)):
outf = 1
hidf = 16
case_exe_path = (exe_path_ad if use_ad else exe_path) / f'{inf}_{hidf}_{outf}.dx'
with open(case_exe_path, 'w') as f:
emit_dex(f, 'rodinia', 'backprop', [
('input', random_vec('in=>Float')),
('target', random_vec('out=>Float')),
('inputWeights', random_mat('{ b: Unit | w: in }=>hid=>Float')),
('hiddenWeights', random_mat('{ b: Unit | w: hid }=>out=>Float')),
('oldInputWeights', random_mat('{ b: Unit | w: in }=>hid=>Float')),
('oldHiddenWeights', random_mat('{ b: Unit | w: hid }=>out=>Float')),
], preamble=[
('in', f'Fin {inf}'),
('hid', f'Fin {hidf}'),
('out', f'Fin {outf}'),
])
print(f'Created {case_exe_path}')
def prepare_rodinia_pathfinder():
exe_path = RODINIA_EXE_ROOT / 'pathfinder'
exe_path.mkdir(parents=True, exist_ok=True)
world_sizes = [(100, 100000)]
for rows, cols in world_sizes:
case_exe_path = exe_path / f'{rows}_{cols}.dx'
with open(case_exe_path, 'w') as f:
emit_dex(f, 'rodinia', 'pathfinder', [
('world', random_mat(f'(Fin {rows})=>(Fin {cols})=>Int', gen='randInt')),
])
print(f'Created {case_exe_path}')
def prepare_parboil_mriq():
# NB: Run-time of this one shouldn't be data-dependent, so for now we just
# generate random inputs of sizes matching the standard dataset.
exe_path = PARBOIL_EXE_ROOT / 'mriq'
exe_path.mkdir(parents=True, exist_ok=True)
problem_sizes = [
(32768, 3072, 'small'),
(262144, 2048, 'large'),
]
for nx, nk, name in problem_sizes:
case_exe_path = exe_path / (name + '.dx')
with open(case_exe_path, 'w') as f:
emit_dex(f, 'parboil', 'mriq', [
*[(f'k{c}', random_vec(f'(Fin {nk})=>Float')) for c in ('x', 'y', 'z')],
*[(f'{c}', random_vec(f'(Fin {nx})=>Float')) for c in ('x', 'y', 'z')],
*[(f'{c}', random_vec(f'(Fin {nk})=>Float')) for c in ('r', 'i')],
])
print(f'Created {case_exe_path}')
def prepare_parboil_stencil():
exe_path = PARBOIL_EXE_ROOT / 'stencil'
exe_path.mkdir(parents=True, exist_ok=True)
problem_sizes = [
(128, 128, 32),
(512, 512, 64),
]
for x, y, z in problem_sizes:
case_exe_path = exe_path / f'{x}_{y}_{z}.dx'
with open(case_exe_path, 'w') as f:
emit_dex(f, 'parboil', 'stencil', [
('input', random_cube(f'(Fin {x})=>(Fin {y})=>(Fin {z})=>Float')),
])
print(f'Created {case_exe_path}')
def prepare_parboil_histogram():
df_root = PARBOIL_DATA_ROOT / 'histo'
exe_path = PARBOIL_EXE_ROOT / 'histogram'
exe_path.mkdir(parents=True, exist_ok=True)
data_files = [
(df_root / 'default' / 'input' / 'img.bin', 'default'),
(df_root / 'large' / 'input' / 'img.bin', 'large'),
]
for df, name in data_files:
with open(df, 'rb') as f:
stream = struct.iter_unpack('i', f.read())
img_width, = next(stream)
img_height, = next(stream)
hist_width, = next(stream)
hist_height, = next(stream)
hist_size = hist_width * hist_height
data = [str(v[0]) for v in stream]
assert len(data) == img_width * img_height
img = list(chunk(data, img_width))
assert len(img) == img_height and len(img[0]) == img_width
case_exe_path = exe_path / (name + '.dx')
with open(case_exe_path, 'w') as f:
emit_dex(f, 'parboil', 'histogram', [
('hist_size', hist_size),
('input', format_matrix(img)),
])
print(f'Created {case_exe_path}')
def random_vec(ty, gen='rand'):
return f'((for i. {gen} (ixkey (newKey 0) i)) : ({ty}))'
def random_mat(ty, gen='rand'):
return f'((for i j. {gen} (ixkey (newKey 0) (i, j))) : ({ty}))'
def random_cube(ty, gen='rand'):
return f'((for i j k. {gen} (ixkey (newKey 0) (i, j, k))) : ({ty}))'
def chunk(l, s):
for i in range(0, len(l), s):
yield l[i:i + s]
def emit_dex(f, suite, name, params, *, preamble=[]):
for n, v in preamble:
f.write(f'{n} = {v}\n')
for n, v in params:
f.write(f'{n} = {v}\n')
f.write('\n')
f.write(f'include "{suite}/{name}.dx"\n')
f.write('\n')
f.write(f'%bench "{name}"\n')
f.write(f'result = {name} {(" ".join(n for n, v in params))}\n')
if PRINT_OUTPUTS:
f.write('\n')
f.write('result\n')
def format_table(l, sep=','):
return '[' + sep.join(l) + ']'
def format_matrix(m):
return format_table((format_table(cols) for cols in m), sep=',\n ')
def ensure_float(s):
if '.' not in s:
return s + ".0"
return s
prepare_parboil_histogram()
prepare_parboil_stencil()
prepare_parboil_mriq()
prepare_rodinia_pathfinder()
prepare_rodinia_backprop()
# Verified outputs
prepare_rodinia_hotspot()
prepare_rodinia_kmeans()
| [
"[email protected]"
] | |
dcd389c1f2d0c0d6c5d4bc6e8bf9480fd938e5db | 6f361cb8e428af590240ebbab955ab2e33cc44d4 | /BasicFlask/url.py | 047527e8c0c034cc18bfbdac0cfd71b3684a060f | [] | no_license | bibaveaarti/FlaskDemo | ed9e3d9e78837eec654a3e2ebd5b1e366533e6c4 | f38cedd7ec75a7a719b821cd0dbd7c060caa7b04 | refs/heads/master | 2020-04-18T06:58:53.309837 | 2019-01-24T10:18:27 | 2019-01-24T10:18:27 | 167,344,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from flask import Flask,render_template
app=Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True)
| [
"bibaveaarti"
] | bibaveaarti |
70df9fc2ce7886f3f681eb0cad792ae2d1b85a03 | ede03f3c5232b5e7a85b27ac0f09fe7470d0f355 | /mylotto/apps.py | 77301f71f8ce87197c1a8c8ce0aa65f0ffa66a56 | [] | no_license | ahnbu/askcompany | 30d3c5ac4d2aee7d45c7210cb4d8a8dfbd2b99cf | 7b04c4255506cb768f850e73c8d975c2dd05d3e4 | refs/heads/master | 2020-04-01T08:51:19.524570 | 2018-11-14T06:59:42 | 2018-11-14T06:59:42 | 153,049,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class MylottoConfig(AppConfig):
name = 'mylotto'
| [
"[email protected]"
] | |
f026bfb37d95f92aa95ec53e521af68a4165cd83 | 049cf15e75b7ae37f49f0338eef9d6a2a2ed14b4 | /epyk_server/system/cli/command_line_fncs.py | 2b979b4c8cea6c80abc097f809cdb2c972ce919c | [] | no_license | epykachu/epyk-engine | a8aec77cd4804ab84609e296b145d67dcc9429aa | 264e1c705e25dc94625b903c45c381d69db49f30 | refs/heads/master | 2020-11-24T08:46:31.488220 | 2019-12-14T19:01:08 | 2019-12-14T19:01:08 | 228,059,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | """
"""
import sys
import argparse
import pkg_resources
import shutil
import os
def main():
""""""
parser_map = {}
arg_parser = argparse.ArgumentParser(prog='epyk-engine')
subparser = arg_parser.add_subparsers(title='Commands', dest='command')
subparser.required = True
for func, parser_init in parser_map.items():
new_parser = subparser.add_parser(func, help=parser_init[1])
parser_init[0](new_parser)
args = arg_parser.parse_args(sys.argv[1:])
return args.func(args)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d4dff6e1a4bbe4895ee775b9a4c5f27ba12a0d0d | 2a5c0c49319989a24f9c9f18530f109bc48a8df1 | /CursesEnded/FirstYear/Python(Algorithmics)/list1/ex2.py | e6d6b6e0e7460b934227c30b5f5b4f0fac782fe5 | [] | no_license | Kamil-IT/Studies | 0ada6dd92d7ecdbd0a3164c7c80080dd715ce8fc | d70343b2b7818ce303d816443bb15d21e03b42e0 | refs/heads/master | 2022-12-22T01:55:35.047783 | 2022-10-20T18:55:29 | 2022-10-20T18:55:29 | 217,039,987 | 0 | 1 | null | 2022-12-10T06:03:55 | 2019-10-23T11:24:33 | Jupyter Notebook | UTF-8 | Python | false | false | 476 | py | import numpy as np
def create_random_matrix_marks(stud_quantity, sub_quantity):
return np.random.choice(np.arange(2., 6., 0.5), size=(stud_quantity, sub_quantity))
rows = 4
columns = 4
matrix1 = create_random_matrix_marks(rows, columns)
matrix2 = create_random_matrix_marks(rows, columns)
symmetric_length = 0
for row in range(0, rows):
for col in range(0, columns):
symmetric_length += abs(matrix1[row, col] - matrix2[row, col])
print(symmetric_length) | [
"[email protected]"
] | |
6dbcf814a18717c624b7c80b81bffc433c263780 | 5aaaac7248081119b4691878e2f1756aa5aba481 | /blog_back/manage.py | eaaff1943d75074aa9ad796bd1e9e87b2e963f16 | [] | no_license | Heyday7/blog_back | 43602b28da28468bfde7bca0e4fac2be5e1fb374 | a214c0c7be7b32d16ccfd85df0d36e3d99eb13b1 | refs/heads/master | 2023-05-04T02:41:38.730643 | 2021-05-15T09:32:21 | 2021-05-15T09:32:21 | 358,890,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_back.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
13953a0f2fd27e1d596a732d066f09bce61ff34a | ffd2c9831079767b8242ff06b53360079e6b3f16 | /predict.py | 0bf183a4d7d133bfeb1c7929e7f5a47acecf24af | [] | no_license | dlam4h/Unet_pytorch | 6084637d23f4c322e032b0cd885bb23fd812c697 | 1410ea8d091ce1b3fff23fed94ff7b75bb0e8b6f | refs/heads/master | 2020-03-25T17:08:06.174774 | 2018-08-08T05:38:05 | 2018-08-08T05:38:05 | 143,964,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | # -*- coding: UTF-8 -*-
import torch
from model import Unet, Unet_bn
from torch.autograd import Variable
from PIL import Image
import torch.nn.functional as F
import numpy as np
import os
import glob
use_gpu = torch.cuda.is_available()
print(use_gpu)
if use_gpu:
torch.cuda.set_device(0)
model = Unet_bn()
model_weight = '2018-08-04 06-23-38 Unet_bn.pt'
if use_gpu:
model.cuda()
model.load_state_dict(torch.load(model_weight))
else:
model.load_state_dict(torch.load(model_weight,map_location='cpu'))
data_path = './test_data/test'
save_path = './test_data/test'
img_type = 'jpg'
img_list = glob.glob(data_path+"/*."+img_type)
for imgname in img_list:
midname = imgname.split('\\')[-1]
img = Image.open(data_path+"/"+midname)
img = np.array(img,dtype=np.float32)
img = np.transpose(img,(2,0,1))
img = torch.from_numpy(img).unsqueeze(0)
if use_gpu:
img = img.cuda()
outputs = model(img)
outputs = F.sigmoid(outputs).squeeze(0).squeeze().cpu().detach().numpy()
else:
outputs = model(img)
outputs = F.sigmoid(outputs).squeeze(0).squeeze().data.numpy()
dlam = Image.fromarray((outputs * 255).astype(np.uint8))
# dlam.show()
# print(save_path+'/'+midname.split('.')[0]+'.tif')
dlam.save(save_path+'/'+midname.split('.')[0]+'.tif')
print(midname.split('.')[0]+'.tif')
# outputs = model(Variable(img,volatile=True))
# image = outputs.detach().numpy()
# a=Image.fromarray(int(image[0]))
# a.save('a.jpg')
| [
"l"
] | l |
cddbcc6aec5e14b2780fdf3276260697db1be33f | bee2bf79ecbeaaf39a2b0f39f5def5d98ff389aa | /tasks/zfs-deps-debian.py | 41aeb324a43d7d59853d8faa4fb7196b5120aa69 | [] | no_license | gunnarbeutner/zfsci | d50e16f63c3f608fe15f8bf378204fcf3d3807c8 | 184464ed6f354f4456ab2ce2ace471c24cfe6789 | refs/heads/master | 2020-06-04T12:27:58.974553 | 2011-06-05T10:24:08 | 2011-06-05T10:24:08 | 1,671,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | from joblib import Task, TaskResult
class ZFSDepDebianTask(Task):
description = "ZFS dependencies installation"
stage = "test"
provides = ['zfs-builddeps']
def run(self):
if os.system("aptitude install -y git-core module-assistant uuid-dev zlib1g-dev gawk") != 0:
return TaskResult.FAILED
return TaskResult.PASSED
def should_run(self):
return (self.job.attributes['fs-type'] == 'zfs' and self.job.attributes['distribution'] == 'debian')
ZFSDepDebianTask.register()
| [
"[email protected]"
] | |
780b8a6d9743014a8c87552d2dfb7d9c1f5a4f51 | 68d75679f12c13a2b5f127d8e069fd85a3f943e5 | /tools/convert_model.py | 5b25e77d879abe2afcb56ac60eefc2d89bf151e5 | [] | no_license | kun1989/pth-easy | ba81b2fb25636cfea347646a83952f3986366bc7 | 4147868c395dc522bb5ed8360df1453e5c52a9fc | refs/heads/master | 2022-09-08T06:46:11.891439 | 2020-06-02T06:54:14 | 2020-06-02T06:54:14 | 249,013,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | import argparse
import torch
from gluoncv.model_zoo import get_model as get_gluon_model
from pth_easy.pytorch_gluoncv_model_zoo import get_model as get_torch_model
import numpy as np
import mxnet as mx
import os
def parse_args():
parser = argparse.ArgumentParser(description='covert gluoncv to pytorch model.')
parser.add_argument('--network', type=str, default='resnet50_v1', help="network name")
parser.add_argument('--save-path', type=str, default='model', help='Path of the model')
args = parser.parse_args()
return args
def convert(name, save_path):
if not os.path.exists(save_path):
os.makedirs(save_path)
print('convert {}\n'.format(name))
gluon_net = get_gluon_model(name, pretrained=True)
gluon_params = gluon_net._collect_params_with_prefix()
gluon_params_keys = list(gluon_params.keys())
torch_net = get_torch_model(name)
torch_params = torch_net.state_dict()
torch_params_keys = list(torch_params.keys())
torch_params_keys = [key for key in torch_params_keys if not key.endswith("num_batches_tracked")]
assert (len(gluon_params_keys) >= len(torch_params_keys))
for i, (gl_key, th_key) in enumerate(zip(gluon_params_keys, torch_params_keys)):
t = torch_params[th_key].shape
g = gluon_params[gl_key].shape
assert (torch_params[th_key].shape == gluon_params[gl_key].shape)
torch_params[th_key].data.copy_(torch.from_numpy(gluon_params[gl_key]._data[0].asnumpy()))
torch_net.eval()
img_data = np.random.randint(0, 255, (1, 3, 224, 224))
img_data = img_data.astype(np.float32) / 255.0
gl_out = gluon_net(mx.nd.array(img_data)).asnumpy()
th_out = torch_net(torch.from_numpy(img_data)).detach().numpy()
print('pytorch: min {}, max: {}, mean: {}'.format(th_out.min(), th_out.max(), th_out.mean()))
print('gluoncv: min {}, max: {}, mean: {}'.format(gl_out.min(), gl_out.max(), gl_out.mean()))
error = gl_out - th_out
print('error: min {}, max: {}, mean: {}'.format(error.min(), error.max(), error.mean()))
print('save {} model\n'.format(name))
torch.save(obj=torch_net.state_dict(), f="{}/{}.pth".format(save_path, name))
if __name__ == '__main__':
args = parse_args()
convert(args.network, args.save_path)
| [
"[email protected]"
] | |
9ab95a3976eb86e3e08c526e6650342d5cd746f1 | fc678a0a5ede80f593a29ea8f43911236ed1b862 | /247-StrobogrammaticNumberII.py | 9fad0fec8759836f4849e4c0a8a3e1ad655112ea | [] | no_license | dq-code/leetcode | 4be0b1b154f8467aa0c07e08b5e0b6bd93863e62 | 14dcf9029486283b5e4685d95ebfe9979ade03c3 | refs/heads/master | 2020-12-13T15:57:30.171516 | 2017-11-07T17:43:19 | 2017-11-07T17:43:19 | 35,846,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | class Solution(object):
def findStrobogrammatic(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = ['0', '1', '8'] if n % 2 else ['']
while n > 1:
n -= 2
res = [a + num + b for a, b in ['00', '11', '88', '69', '96'][n < 2:] for num in res]
return res
| [
"[email protected]"
] | |
d562a0fbe8ab34e4d043229a0cb3d20e19ae2375 | d358106b304757d13da40ff7be4df6293af06990 | /functions/face_index.py | 5f0d9d095a2d8086f41e735ccc355b3049351b98 | [
"Apache-2.0"
] | permissive | arapulido/face-recognition-serverless-app | 1693f5b48f8c72ff974c90a43b4116ad64f98f3a | e7ae37a9bf7618a3a15d277ce30af3fc6ac52e01 | refs/heads/master | 2023-02-06T06:26:53.149777 | 2022-05-06T13:19:23 | 2022-05-06T13:19:23 | 213,391,123 | 2 | 3 | Apache-2.0 | 2023-01-27T04:41:54 | 2019-10-07T13:30:02 | Python | UTF-8 | Python | false | false | 1,535 | py | import json
import logging
import os
from datadog_lambda.metric import lambda_metric
from datadog_lambda.wrapper import datadog_lambda_wrapper
import boto3
# Set logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@datadog_lambda_wrapper
def handler(event, context):
params = json.loads(event['Records'][0]['Sns']['Message'])
if 'srcBucket' not in params or 'name' not in params:
logger.error("Validation failed. Missing parameters")
raise Exception("Missing parameters")
rekognition_client = boto3.client('rekognition')
sns_client = boto3.client('sns')
collection_id = os.environ['REKOGNITION_COLLECTION_ID']
data = rekognition_client.index_faces(
CollectionId=collection_id,
DetectionAttributes=[],
ExternalImageId=params['userId'],
Image={
'S3Object': {
'Bucket': params['srcBucket'],
'Name': params['name'],
}
}
)
params['faceId'] = data['FaceRecords'][0]['Face']['FaceId']
# Count an image indexed
lambda_metric(
"face_recognition.images_indexed",
1,
tags=['face_id:'+params['faceId'],
'bucket:'+params['srcBucket'],
'image_name:'+params['name'],
'user:'+params['userId']]
)
sns_client.publish(
TopicArn=os.environ['FACE_DETECTION_PERSIST_TOPIC'],
Message=json.dumps(params))
response = {
"statusCode": 200,
"body": json.dumps(data)
}
return response
| [
"[email protected]"
] | |
6134962ce9c8843beee87adc985dcc6ea8159d35 | 154b888942d26e754280463383caa5bb51d11a9a | /Python/PyYouTube/venv/Scripts/easy_install-script.py | e432e3646cb744ada3ffe9b25a9e1ed5ec305df0 | [] | no_license | learn-devcat/playground | 7a34c07ffdae9208a673a1d5a40f6323f07b6c23 | e3488472840af991e86233ab54de082e506ee860 | refs/heads/master | 2020-03-27T03:34:13.170818 | 2018-09-12T22:44:59 | 2018-09-12T22:44:59 | 145,872,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!"D:\OneDrive-RC\OneDrive\__GIT REPOS__\playground\Python\PyYouTube\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
607677a8f624cc9c48ba2bd29abad65151a36ede | 9ac0866eee361bfee730ae87d2f413b5ad82e23a | /examples/register.py | f605f62516afc5c8603b2896b60fe51e14076501 | [
"BSD-2-Clause"
] | permissive | jeffmahler/lfd | e6cb2c309eec6276d51372eca0170ccb72c4a2b0 | ecc6b934db098c0b1af9946454917b6dc911cb74 | refs/heads/master | 2020-12-26T00:35:37.302745 | 2015-01-15T17:27:10 | 2015-01-15T17:27:10 | 27,498,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | #!/usr/bin/env python
from __future__ import division
import numpy as np
from lfd.environment.simulation import StaticSimulation
from lfd.environment.simulation_object import BoxSimulationObject
from lfd.registration.registration import TpsRpmRegistrationFactory
from lfd.registration.plotting_openrave import registration_plot_cb
from lfd.demonstration.demonstration import Demonstration, SceneState
np.random.seed(0)
table_height = 0.77
table = BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False)
sim = StaticSimulation()
sim.add_objects([table])
sim.create_viewer()
def generate_cloud(x_center_pert=0, max_noise=0.02):
# generates 40 cm by 60 cm cloud with optional pertubation along the x-axis
grid = np.array(np.meshgrid(np.linspace(-.2,.2,21), np.linspace(-.3,.3,31))).T.reshape((-1,2))
grid = np.c_[grid, np.zeros(len(grid))] + np.array([.5, 0, table_height+max_noise])
cloud = grid + x_center_pert * np.c_[(0.3 - np.abs(grid[:,1]-0))/0.3, np.zeros((len(grid),2))] + (np.random.random((len(grid), 3)) - 0.5) * 2 * max_noise
return cloud
demos = {}
for x_center_pert in np.arange(-0.1, 0.6, 0.1):
demo_name = "demo_{}".format(x_center_pert)
demo_cloud = generate_cloud(x_center_pert=x_center_pert)
demo_scene_state = SceneState(demo_cloud, downsample_size=0.025)
demo = Demonstration(demo_name, demo_scene_state, None)
demos[demo_name] = demo
test_cloud = generate_cloud(x_center_pert=0.2)
test_scene_state = SceneState(test_cloud, downsample_size=0.025)
plot_cb = lambda i, i_em, x_nd, y_md, xtarg_nd, wt_n, f, corr_nm, rad: registration_plot_cb(sim, x_nd, y_md, f)
reg_factory = TpsRpmRegistrationFactory(demos)
regs = reg_factory.batch_register(test_scene_state, callback=plot_cb)
| [
"[email protected]"
] | |
de51256b47b7914d5af0f62a042ec0c6296fb9e1 | 7af013dcf89b7f6c3aff4fbfdcd3281e481b103b | /Variables/swimming.py | 9e3e462a848d71ec65bcba7180436e54356152ee | [] | no_license | Rodrigoh2702/Minecraft-Python | 13925bbfba09cbd748b913e42110eef5dbdcb4e9 | 582c57ee21b16691cfd7d1070bba3b9a9d90626c | refs/heads/master | 2020-04-29T13:45:32.190096 | 2019-03-19T23:05:05 | 2019-03-19T23:05:05 | 176,177,349 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from mcpi.minecraft import Minecraft
mc=Minecraft.create()
pos=mc.player.getTilePos()
x=pos.x
y=pos.y
z=pos.z
blockType=mc.getBlock(x,y,z)
mc.postToChat(blockType==9)
#Si quieres ver si estas completamente debajo del agua,
#Crea una variable que almacene y+1 y comparala con 9 tambien
| [
"[email protected]"
] | |
8fd8097c291f2403be1a995dba902a981588c9b7 | f0fa368ad60685cd1a593c2aa1effdf19649c48d | /app.py | e97ebaf150311c3fd5b9410b3545034d17e4acb9 | [] | no_license | adithyawn/knomi-v1-deployment | 17d3bcf2c57765b87c2c3af3d428c808c2131912 | 13144e342e1a347f7ffc4a74e59751c735d6ec80 | refs/heads/master | 2023-06-18T04:37:20.188799 | 2021-07-22T02:26:46 | 2021-07-22T02:26:46 | 388,303,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,285 | py | from flask import Flask, request, render_template, url_for, redirect, flash
import requests
import json
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField
from wtforms.fields.core import SelectField
from wtforms.validators import length, InputRequired, EqualTo
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, current_user,logout_user
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_
app = Flask(__name__)
app.config['SECRET_KEY'] = 'ghjdfdsfxxxx'
app.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://db-droplet1-admin:[email protected]:25060/db-knomi'
db = SQLAlchemy(app)
migrate = Migrate(app,db)
login_manager = LoginManager(app)
#mengarahkan ke def login kalo masuk profile tanpa login
login_manager.login_view = 'login'
######### DATABASE ###################
#Add User Mixin for Login
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
username = db.Column(db.String(30), unique=True)
password = db.Column(db.String(150))
status = db.Column(db.Integer)
class Content(db.Model):
id = db.Column(db.Integer, primary_key=True)
keyword = db.Column(db.String(150), unique=True)
answer = db.Column(db.Text)
category_content = db.Column(db.String(150))
category_content = db.Column(db.String(150))
content_date = db.Column(db.DateTime)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
category = db.Column(db.String(100), unique=True)
#user_id is default id by login manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
######### FORM ###################
class LoginForm(FlaskForm):
username = StringField('Username :', validators=[InputRequired(message='Username is required!'), length(max=30, message='Characters can\'t be more than 30!')])
password = PasswordField('Password :', validators=[InputRequired(message='Password is required!')])
remember = BooleanField('Remember Me')
class AddUserForm(FlaskForm) :
name = StringField('Full Name:', validators=[InputRequired(message='Full name is required!'), length(max=100, message='Character can\'t be more than 100!')])
username = StringField('Username:', validators=[InputRequired(message='Username is required!'), length(max=30, message='Character can\'t be more than 30!')])
password = PasswordField('Password:', validators=[InputRequired(message='Password is required!'), length(max=150, message='Character can\'t be more than 150'), EqualTo('password_confirm', message='Password must match!')])
password_confirm = PasswordField('Repeat Password:', validators=[InputRequired(message='Password is required!'), length(max=150, message='Character can\'t be more than 150!'),])
status = SelectField('status', choices=[(1,"Admin"),(2,"User")])
class AddContentForm(FlaskForm) :
keywords = StringField('Keyword:', validators=[InputRequired(message='Keyword is required!')])
answer = TextAreaField('Answer:', validators=[InputRequired(message='Answer is required!')])
select_category = SelectField('select_category', choices=[])
select_page = SelectField('page', choices=[(5,5),(10,10)], default=(5,5))
class AddCategoryForm(FlaskForm) :
category = StringField('Full Name:', validators=[InputRequired(message='Category is required!'), length(max=100, message='Character can\'t be more than 100!')])
###################################
####### #INDEX ############
@app.route('/')
def index():
form = LoginForm()
return render_template('index.html', form=form)
@app.route('/login', methods=['GET','POST'])
def login():
form = LoginForm()
if request.method == 'POST': #form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
# print(user)
if not user:
return render_template('index.html', form=form, message='Login Required!')
if check_password_hash (user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('knowledgebase'))
return render_template('index.html', form=form, message='Login Failed!')
return render_template('index.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
####### #SETTINGS ############
@app.route('/settings')
@login_required
def settings():
form = AddUserForm()
query_user = User.query.all()
return render_template('settings.html', form=form, query_user=query_user)
@app.route('/adduser', methods=['GET','POST'])
@login_required
def adduser():
form = AddUserForm()
#this is for query in render template
query_user = User.query.all()
if form.validate_on_submit():
try :
new_user = User(name=form.name.data, username=form.username.data, password=generate_password_hash(form.password.data), status=form.status.data)
db.session.add(new_user)
db.session.commit()
flash('User successfully added !','success')
except IntegrityError :
db.session.rollback()
flash('Username is duplicated !','danger')
return redirect(url_for('settings'))
else :
flash('Add user error !','danger')
return render_template('settings.html', form=form, query_user=query_user)
@app.route('/deleteuser/<int:id>', methods=['GET','POST'])
@login_required
def deleteuser(id):
query_id = User.query.filter_by(id=id).first()
db.session.delete(query_id)
db.session.commit()
return redirect(url_for('settings'))
@app.route('/edituser/<int:id>', methods=['GET'])
@login_required
def edituser(id):
form = AddUserForm()
query_user = User.query.all()
query_edit_by_id = User.query.filter_by(id=id).first()
# print(query_edit_by_id.name)
# print(type(query_edit_by_id))
#untuk edit form ketika render template dengan query sesuai id-nya
form.name.data = query_edit_by_id.name
form.username.data = query_edit_by_id.username
# form.status.data = query_id.status
return render_template('settings.html', form=form, query_user=query_user) #query user biar muncul semua di list template settings.html
####### #KNOWLEDGEBASE ############
@app.route('/knowledgebase', methods=['GET','POST'], defaults={"page":1})
@app.route('/knowledgebase/<int:page>',methods=['GET','POST'])
@login_required
def knowledgebase(page):
form = AddContentForm()
page = page
pages = 5
# source = [('mangga',),('apel',),('jeruk',)] convert to List
query_keyword = Content.query.with_entities(Content.keyword).all()
source = [j for i in query_keyword for j in i]
# print((source))
contents = Content.query.order_by(Content.content_date.desc()).paginate(page,pages,error_out=False)
# source = [j for i in query_keyword for j in i]
# print((source))
query_category = Category.query.with_entities(Category.category).all()
query_category = [j for i in query_category for j in i]
form.select_category.choices = query_category
update_answer = 0
if request.method == 'POST':
page = 5
pages = contents.total
if request.form.get('tag-all') :
# tag = request.form['tag-all'] #kalau mau langsung ambil datanya pakai ['tag'] bukan request.form.tag atau bisa pakai form.tag.data kalau pakai wtforms
tag = request.form.get('tag-all')
search = "%{}%".format(tag)
contents = Content.query.filter(or_(Content.keyword.like(search),Content.answer.like(search))).paginate(per_page=pages,error_out=True)
elif request.form.get('tag-keyword') :
tag = request.form.get('tag-keyword')
search = "%{}%".format(tag)
contents = Content.query.filter(Content.keyword.like(search)).paginate(per_page=pages,error_out=True)
elif request.form.get('tag-answer') :
tag = request.form.get('tag-answer')
search = "%{}%".format(tag)
contents = Content.query.filter(Content.answer.like(search)).paginate(per_page=pages,error_out=True)
elif request.form.get('select_category') :
search = request.form.get('select_category')
contents = Content.query.filter(Content.category_content.like(search)).paginate(per_page=pages,error_out=True)
return render_template('knowledgebase.html',current_user=current_user, source=source, contents=contents, form=form, update_answer=update_answer)
@app.route('/addcontent', methods=['POST'])
@login_required
def addcontent():
form = AddContentForm()
if request.method == 'POST':
getkeywords = request.form.get('keywords')
getkeywordssplit = getkeywords.split(",")
# output = ['konten',' konten update',' konten update mingguan']
# BUT there is space lead ' konten update'
keywords = []
for i in getkeywordssplit:
print(i)
s = i.lstrip()
keywords.append(s)
# output = ['konten','konten update','konten update mingguan']
for i in keywords:
answer = form.answer.data
category_content = form.select_category.data
try :
addcontent = Content(keyword=i, answer=answer, category_content=category_content, content_date=datetime.now())
db.session.add(addcontent)
db.session.commit()
flash("Keyword '{}' is succesfully added!".format(i),"success")
except IntegrityError :
db.session.rollback()
flash("Keyword '{}' is available, you can't duplicate!".format(i),"danger")
return redirect(url_for('knowledgebase'))
@app.route('/updatecontent', methods=['POST'])
@login_required
def updatecontent():
form = AddContentForm()
query_contents = Content.query.filter_by(keyword=form.keywords.data).first()
if request.method == 'POST':
query_contents.answer = form.answer.data
query_contents.category_content = form.select_category.data
db.session.commit()
flash("Keyword '{}' is updated".format(form.keywords.data),"success")
return redirect(url_for('knowledgebase'))
@app.route('/deletecontent/<int:id>', methods=['GET','POST'])
@login_required
def deletecontent(id):
contents = Content.query.filter_by(id=id).first()
db.session.delete(contents)
db.session.commit()
return redirect(url_for('knowledgebase'))
@app.route('/deletecontentselected', methods=['POST'])
@login_required
def deletecontentselected():
if request.method == 'POST':
selectedcontents = request.form.getlist('selectedcheckbox')
# print(selectedcontents)
for i in selectedcontents:
contentselected = Content.query.filter_by(id=i).first()
db.session.delete(contentselected)
db.session.commit()
return redirect(url_for('knowledgebase'))
@app.route('/editcontent/', methods=['GET','POST'], defaults={"page":1}) #pg
@app.route('/editcontent/<int:page>/<int:id>', methods=['GET','POST'])
@login_required
def editcontent(page,id): #pg
form = AddContentForm()
page = page #pg
pages = 5 #pg
query_keyword = Content.query.with_entities(Content.keyword).all()
source = [j for i in query_keyword for j in i]
# print((source))
query_category = Category.query.with_entities(Category.category).all()
query_category = [j for i in query_category for j in i]
contents = Content.query.order_by(Content.content_date.desc()).paginate(page,pages,error_out=False) #pg
contents_edit_by_id = Content.query.filter_by(id=id).first()
form.keywords.data = contents_edit_by_id.keyword
form.answer.data = contents_edit_by_id.answer
#to give dropdown list option
form.select_category.choices = query_category
#to input default value from query
form.select_category.data = contents_edit_by_id.category_content
update_answer = 1
return render_template('knowledgebase.html', contents=contents, form=form,source=source, update_answer=update_answer) #query user biar muncul semua di list template settings.html
####### #CATEGORY ############
@app.route('/category')
@login_required
def category():
form = AddCategoryForm()
query_category = Category.query.all()
return render_template('category.html', form=form, query_category=query_category, list_category=query_category)
@app.route('/addcategory', methods=['GET','POST'])
@login_required
def addcategory():
form = AddCategoryForm()
#this is for query in render template
query_category = Category.query.all()
if form.validate_on_submit():
try :
new_category = Category(category=form.category.data)
db.session.add(new_category)
db.session.commit()
flash('Category successfully added !','success')
except IntegrityError :
db.session.rollback()
flash('Category is duplicated !','danger')
return redirect(url_for('category'))
else :
flash('Add user error !','danger')
return render_template('category.html', form=form, query_category=query_category)
@app.route('/deletecategory/<int:id>', methods=['GET','POST'])
@login_required
def deletecategory(id):
query_id = Category.query.filter_by(id=id).first()
db.session.delete(query_id)
db.session.commit()
return redirect(url_for('category'))
@app.route('/editcategory/<int:id>', methods=['GET'])
@login_required
def editcategory(id):
form = AddCategoryForm()
query_category = Category.query.all()
query_edit_by_id = Category.query.filter_by(id=id).first()
form.category.data = query_edit_by_id.category
return render_template('category.html', form=form, query_category=query_category) #query category biar muncul semua di list template category.html
@app.route('/webhook', methods=['POST'])
def webhook():
r = request.form['data']
j = eval(r)
print(j)
ask = j['text'].lower()
# query_answer = Content.query.filter_by(keyword=ask).all()
query_keyword = Content.query.with_entities(Content.keyword).all()
# output = [('konten',),('konten update',),('konten update mingguan',)]
source = [j for i in query_keyword for j in i]
# output = ['konten','konten update','konten update mingguan']
if ask in source:
check_answer = Content.query.filter(Content.keyword.like(ask)).all()
for i in check_answer:
reply = i.answer
else :
default_answer = Content.query.filter(Content.keyword.like('defaultanswer')).all()
for i in default_answer:
reply = i.answer
return {'autoreply':reply}
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
b94dc6a66eb355fc0c8483affd2eb4f2e9472452 | 949fe26133c6fca2fed271a6af3b72827cb57a21 | /hm_03_创建游戏窗口.py | 3966580bd87a68169c16cee55cab9eb7fb68f600 | [] | no_license | luzhengshu/pygame | b7c37e6e9f7a8aa613d92bd979fdf1f1505d0195 | 74241f04a3ee599c14d02e28babf035d32287f8f | refs/heads/master | 2020-07-16T09:20:39.509335 | 2019-09-02T02:34:46 | 2019-09-02T02:34:46 | 205,762,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | import pygame
pygame.init()
#创建游戏窗口
screen = pygame.display.set_mode((480,700))
while True:
pass
pygame.quit() | [
"[email protected]"
] | |
2da0d17fccea0d88c67b78668de83e5f1b8b0cfd | 9466dd56f270f86b133a82f49683a18535eea2b0 | /salt_code_examples/eapi/job_w_pillar.py | 4f425380454de9167728fc9ace5161474f61d8fc | [] | no_license | duboc/vra-repo | 3cfde9aefba30de9902315bc80ffd7e2e539b2d8 | 26bde2fcaf404c4b35dad495d9aa1629f60c0c97 | refs/heads/master | 2022-05-01T12:28:00.507744 | 2022-04-26T20:16:14 | 2022-04-26T20:16:14 | 237,105,663 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from sseapiclient.tornado import SyncClient
client = SyncClient.connect('https://localhost', 'root', 'salt', ssl_validate_cert=False)
client.api.cmd.route_cmd(cmd='runner',
fun='state.orch',
masters='salt', # cluster ID
arg={'arg': ["myorch"], # orchestration state
'kwarg': {
"pillar": { # inline pillar data
"targets":"minion1",
"foo":"bar",
"fruit":"apples"
}
| [
"[email protected]"
] | |
c36677f1e55f79a9361cade5160ee723c4a6b6c0 | 941a107603ea825053636f2fe8e59b5321fd1f4d | /9/9.6.py | d657e051a2d847b12f9fe398735db4cfbf467e76 | [] | no_license | LYTXJY/python | f8fa72e6138aa8a1865ffd32cf0ccfcea7709a73 | ce6c21b04fbc061475ac4207685535e4086156f6 | refs/heads/main | 2023-02-18T04:49:51.483317 | 2021-01-18T08:07:57 | 2021-01-18T08:07:57 | 330,585,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | #重载运算符
def test_1():
"""
"""
class Myclass():
"""
重载运算符
"""
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return "name:"+self.name + ";age:"+str(self.age)
__repr__ = __str__
def __lt__(self, record):
if self.age < record.age:
return True
else:
return False
def __add__(self, record):
return Myclass(self.name, self.age + record.age)
myc = Myclass("anna", 58)
myc1 = Myclass("gray", 22)
print(repr(myc))
print(myc)
print(str(myc))
print(myc<myc1)
print(myc+myc1)
if __name__ == "__main__":
test_1() | [
"[email protected]"
] | |
3851d46c9fab57c79ec762a76ce92f3eca0e286e | df998ba3511a4ce89bf7b9aa2bab9142aaa4581d | /steelframe/caret.py | 7c1e9efec9fdc63838e0bb372800fe03ea04a55f | [] | no_license | iampri/chatLinePy | 3986a2761352e1f5e1c136da69a1e8bad223674d | ab30599715210c987dcb9394cac02121ca5a5357 | refs/heads/master | 2020-12-02T19:41:15.315631 | 2017-06-17T11:42:21 | 2017-06-17T11:42:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
import pytz
from lxml import html
import requests
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('django')
class Caret():
def time(self):
ict = pytz.timezone('Asia/Bangkok')
return timezone.now().astimezone(ict).strftime('%H:%M %p')
def date(self):
ict = pytz.timezone('Asia/Bangkok')
return timezone.now().astimezone(ict).strftime('%A, %d %B %Y')
def lottery(self):
page = requests.get('http://lotto.mthai.com/tag/%E0%B8%AB%E0%B8%A7%E0%B8%A2%E0%B8%A3%E0%B8%B1%E0%B8%90%E0%B8%9A%E0%B8%B2%E0%B8%A5')
tree = html.fromstring(page.content)
period = tree.xpath('//p[@class="small-title show-for-small-only text-center hide-for-print"]/text()')[0][8:]
prize1 = tree.xpath('//div[@id="prize-1"]/div/div/span/text()')
prizel2 = tree.xpath('//div[@id="prize-l2"]/div/div/span/text()')
prizef3 = tree.xpath('//div[@id="prize-f3"]/div/span/span/text()')
prizel3 = tree.xpath('//div[@id="prize-l3"]/div/span/span/text()')
prizen1 = tree.xpath('//div[@id="prize-n1"]/div/span/span/text()')
return 'งวดวันที่ '+ period + ", รางวัลที่ 1 " + prize1[0] + ', เลขท้าย 2 ตัว ' + prizel2[0] + ', เลขหน้า 3 ตัว ' + prizef3[0] + ', เลขท้าย 3 ตัว ' + prizel3[0] + ', รางวัลข้างเคียงรางวัลที่ 1 ' + prizen1[0]
| [
"[email protected]"
] | |
202a700ec11275da22c3aceb5b4395991f4bd643 | 36ab0757d3f1e296871570d7ad384fa6bd2eb9f1 | /dodger/sentences/test.py | 62a11c4a55dbc009372599f20c7f8da2b29e9156 | [] | no_license | khalidess24/Intro-to-CS-2018 | fe95cbc0e708ea1579d30e84148f4c304c6e7a9f | fc43521ffdc0ec1845941dbdc0c3d411e87e79a3 | refs/heads/master | 2020-04-11T09:45:07.317684 | 2018-12-14T19:57:48 | 2018-12-14T19:57:48 | 161,690,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | import os
path = os.getcwd()
sentencelist = os.listdir(path)
sentencelist.remove('.DS_Store')
sentencelist.remove('test.py')
# print(sentencelist.index('test.py'))
# print(sentencelist)
sentences = []
for i in sentencelist:
city = open(path+"/"+i,'r') #encoding='utf-8'
for line in city:
x = line.rstrip().split(",")
sentences.append({x[0]:x[1]})
print(sentences[0])
| [
"[email protected]"
] | |
bb79818277eeda215be00f03da0ae1edb2cf43e9 | 99986c5471051806f9bd66f35d1a52dd497df52b | /src/v5/context/context/settings.py | e145932ba71b3fc2c50f26e4da5341caad665d3d | [
"MIT"
] | permissive | Strangemother/project-conceptnet-graphing | 00b1ca87718efc5342ff47d7355e352b3efcf5b1 | 3fd1b3188088090c71c95b1a660770482123ce22 | refs/heads/master | 2020-03-23T02:20:54.532613 | 2019-08-01T01:03:27 | 2019-08-01T01:03:27 | 140,969,798 | 0 | 0 | MIT | 2019-10-21T16:47:52 | 2018-07-14T18:43:18 | JavaScript | UTF-8 | Python | false | false | 3,227 | py | """
Django settings for context project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dxo4ou=jhi@xivxh8y($wu9l-sx%zj_3_%vik)cp)_nnzunz$m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'first',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'context.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'context.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
b996dba7060ff95f4cea7a9b5b45c502415af7df | f3e6f25e6ae78c3ae7f24bf50e3fe865f9156dc0 | /xlsx2plj/xlsx2plj/xlsxtool.py | f29a0a866f600d1110350f91fee4e5cfae096fd5 | [
"Apache-2.0"
] | permissive | XP669/XLS2PLJ | 9eb24d044f03213979cb1e4bae9a88ebf9a740e3 | 40b48470d3ec686344486001a8264f8270ced0ac | refs/heads/master | 2020-04-18T03:56:23.151015 | 2019-01-23T16:08:55 | 2019-01-23T16:14:04 | 167,219,598 | 4 | 4 | null | null | null | null | GB18030 | Python | false | false | 5,104 | py | #-*- coding: gb2312 -*-
"""
工具
"""
import os
import sys
import xlsxError
from config import *
from xml.dom.minidom import *
def exportMenu(msgIndex, YCallback = None, NCallback = None, OCallback = None):
"""
简单的询问Y(yes),N(no), Y:pass,N:exit, Other:continue
now assume 输入 Y
"""
if YCallback:
YCallback()
return
# xlsxError.info_input(msgIndex)
# msg = "------------请输入Y(yes), N(no)"
# if OCallback:
# msg += ',other(o)'
# print( msg,'------------------' )
# print( "您的输入:" )
# input_command = raw_input()
# while(input_command):
# if input_command.lower() == "y" or input_command == '\r':
# if YCallback:YCallback()
# return
# elif input_command.lower() == "n":
# if NCallback:NCallback()
# sys.exit(1)
# elif input_command.lower() == "o":
# if OCallback:OCallback()
# return
# else:
# print( "是Y还是N:", )
# input_command = raw_input()
def checkExtName(filePath, extName):
"""
检测扩展名,请将.也丢进来
"""
if filePath == "" or extName == '':
return
fileName = os.path.split(filePath)[-1]
return fileName.rfind(extName) >= 0
def __checkPath(dirPath):
"""
必须有driverName E:,D: ==
"""
driveName = os.path.splitdrive(dirPath)[0]
if not os.path.isdir(driveName):
raise xlsxError.xe(EXPORT_ERROR_CPATH, (dirPath, ))
__onCheckPath(dirPath)
return
def __onCheckPath(dirPath):
"""
递归创建目录
"""
if not os.path.isdir(dirPath):
prvdir = os.path.split(dirPath)[0]
if not os.path.isdir(prvdir):
__onCheckPath(prvdir)
os.mkdir(dirPath)
def createDir(dirPath):
"""
创建目录
"""
__checkPath(dirPath)
#__checkOkCreate(dirPath)
return
def getFileMTime(fileName):
return os.stat(fileName)[-2]
###########字符串处理####################
def inputList(var_list):
"""
"""
for element in var_list:
if isinstance(element, list):
inputList(element)
elif isinstance(element, str):
inputElement(element)
def inputElement(element):
"""
对字串编码处理
"""
if isinstance(element, str):
#element.strip().replace
print( element )#.encode(FILE_CODE),
#else:
#print( element),
return
def str2List(error_str, pywinerr_list):
"""
字符串 -> list,不检测括号的匹配
"""
begin_pos = error_str.find('(')
next_pos = error_str.find(')')
mid_pos = begin_pos
if begin_pos > -1 and next_pos > -1:
pywinerr_list.append([])
suberr_list = pywinerr_list[-1]
while next_pos > -1:
mid_pos = error_str.find('(', mid_pos+1, next_pos)
if mid_pos > -1:
next_pos = error_str.find(')', next_pos+1)
else:
break
str2List(error_str[begin_pos+1:next_pos], suberr_list)
str2List(error_str[:begin_pos-1] + error_str[next_pos+1:], suberr_list)
else:
for strVal in error_str.split(","):
pywinerr_list.append(strVal)
def val2Str(data):
if isinstance(data, float):
return str(int(data))
if isinstance(data, bytes):
return data.decode("utf-8")
else:
return data
################################################
def list_to_text(ls):
return tuple_to_text(ls)
def tuple_to_text(t):
text = '('
for x in t:
v = value_to_text(x)
text += v + ', '
text += ')'
return text
def dict_to_text_py(d):
text = '{\n'
for k, v in d.items():
text += "%s:%s,\n" % (k,v)
text += '}\n'
return text
depth = 1
def dict_to_text_lua(d):
global depth
text = '{'
if depth ==1:
text += '\n'
for k, v in d.items():
depth += 1
v = value_to_text_lua(v)
if v != 'None':
if k == None :
if depth == 2:
text += '\t' + v + ',\n'
else:
text += v + ","
else:
k = value_to_text_lua(k)
if depth == 2:
text += '\t[' + k + ']=' + v + ',\n'
else:
text += '[' + k + ']=' +v + ","
depth -= 1
text += '}'
if depth ==1:
text += '\n'
return text
def value_to_text_lua(v):
if isinstance(v, str):
return "'" + v.replace('\'', '\\\'') + "'"
if isinstance(v, bytes):
return v.decode("utf-8")
if isinstance(v, dict):
return dict_to_text_lua(v)
if isinstance(v, list):
return list_to_text_lua(v)
if isinstance(v, tuple):
return tuple_to_text_lua(v)
if isinstance(v, bool):
if v==True:
return 'true'
else:
return 'false'
return str(v)
def list_to_text_lua(ls):
return tuple_to_text_lua(ls)
def tuple_to_text_lua(t):
text = '{'
for x in t:
v = value_to_text_lua(x)
text += v + ', '
text += '}'
return text
def value_to_text(v):
if isinstance(v, str):
return "'" + v.replace('\'', '\\\'') + "'"
if isinstance(v, bytes):
return v.decode("utf-8")
if isinstance(v, dict):
return dict_to_text(v)
if isinstance(v, list):
return list_to_text(v)
if isinstance(v, tuple):
return tuple_to_text(v)
return str(v)
#######################code############################
def toGBK(val):
if isinstance(val, str):
return val.encode("utf-8")
return val
def GTOUC(val):
return val
def STOU(val):
"""
SYS_CODE -> utf-8
"""
return val
def UTOF(val):
"""
utf-8 -> FILE_CODE
"""
return val
def FTOU(val):
"""
FILE_CODE ->UTF-8
"""
return val
| [
"[email protected]"
] | |
c4a415c300ba3143dbb10888062575bebca50457 | 45b72f5149bcce54a2b0d15ea5077cb3dac030ce | /light bulb/C_HNSWFLAT.py | 8447054b16d206ba35c9e029d0feb03879f9fbd8 | [] | no_license | davidmikulovsky/NIDS-detector | 603e8b69f8df631d3f1896f9a25823182fc157e7 | d1bf0df2ca59979488d674d1b645dd5747ccc302 | refs/heads/master | 2022-09-16T08:17:40.071862 | 2020-05-18T22:33:49 | 2020-05-18T22:33:49 | 264,273,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,341 | py | #!/usr/bin/python
import time
import numpy as np
import faiss
import os
import sys
import getopt
def main(sys):
np.seterr(over='ignore')
m = len(sys)
print ("The script has the name %s" % (sys[0]))
print("initiate: %s " % (sys[0]))
print ("Number of arguments: ", m, " arguments.")
input_file_dataset = sys[1]
input_file_queries = sys[2]
#k = int(sys[3])
var = sys[4]
run = sys[5]
ground_truth_D = sys[6]
ground_truth_I = sys[7]
error = float(sys[8])
print("check of the arguments")
for i in range(m):
print("arguments: %s " % (sys[i]))
dataset = os.path.realpath(input_file_dataset)
queryset = os.path.realpath(input_file_queries)
groundtruth_D = os.path.realpath(ground_truth_D)
groundtruth_I = os.path.realpath(ground_truth_I)
#ground_truth = os.path.realpath(output_file_gt)
a_vectors = np.loadtxt(dataset).astype(np.float32)
query_set = np.loadtxt(queryset).astype(np.float32)
GT_D = np.loadtxt(groundtruth_D).astype(np.float32)
GT_I = np.loadtxt(groundtruth_I).astype(np.float32)
k = len(GT_D[0])
n_db = len(a_vectors)
d = len(a_vectors[0]) #dimension of database
n_q = len(query_set)
fo = len(a_vectors)
nlist = int(float(fo) / k) #number of clusters
nlist = 2* nlist
nprobe = int((k/2)+1) #how many times repeat search
print("check of dimensions")
print("param n_db", n_db)
print("param d", d)
print("param k", k)
print("param n_q", n_q)
print("param nlist", nlist)
print("param nprobe", nprobe)
print("param error", error)
print("faiss ...")
start1 = time.clock()
index = faiss.IndexHNSWFlat(d, 32)
# training is not needed
# this is the default, higher is more accurate and slower to
# construct
index.hnsw.efConstruction = 128
print("add")
# to see progress
index.verbose = True
index.add(a_vectors)
stop1 = time.clock()
#----#start search
start2 = time.clock()
index.hnsw.search_bounded_queue = True
index.hnsw.efSearch = 128
D, I = index.search(query_set, k) # actual search
stop2 = time.clock()
#---#end
#run recall
recall_i = recall_similar_match( GT_I, I)
recall_d = recall_with_error( GT_D, D, error)
stringname_D = 'D' + sys[0][1:9] + '_' + var +'.txt'
stringname_I = 'I' + sys[0][1:9] + '_' + var +'.txt'
np.savetxt(stringname_D, D)
np.savetxt(stringname_I, I)
time1 = stop1 - start1
time2 = stop2 - start2
#run, filename, index_time, build_time, recall_D, recall_I, n_db, n_q, d, k
print_time(run, sys[0], time1, time2, recall_d, recall_i, n_db, n_q, d, k, error)
print("finish")
#------#
#helper functions
#------#
def print_time(run, filename, index_time, build_time, recall_D, recall_I, n_db, n_q, d, k , error):
# print("finish (total cpu time): ", (time.clock() - start)/60)
total_time = index_time + build_time
speed = int(n_db/ total_time)
#instr = "'{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}'".format(softname, procversion, int(percent), exe, description, company, procurl)
with open('outputfile.txt', 'a') as file:
file.write( "{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11} , {12} \n".format( run, filename, index_time, build_time, total_time, speed, recall_D, recall_I, n_db, n_q, d, k, error))
#recall function for values
#recall function for values
def recall_similar_match( GT_I, I):
if len(I) != len(GT_I) or len(I[0]) != len(GT_I[0]):
print("dimensions of ground truth are not equal to the result ")
return "error"
counter = 0
for i in range(len(I)):
for j in range(len(I[i])):
# check each value in I and, if they are contained in the list of GT_I[i]
# if not count error
#print(" ", GT_I[i][j], I[i][j])
if I[i][j] not in GT_I[i]:
counter = counter + 1
#print("wrong values found ", counter)
dimension = len(GT_I) * len(GT_I[0])
print("total values in gt_I vs. errors ", dimension, counter)
rec = float(dimension) - float(counter)
#print(" rec", rec)
rec = (rec/float(dimension))*100
#print(" rec", rec)
#print ("recall by correct index: ", recall)
print ("recall by correct index: ", rec, " %")
return rec
#recall function for values
def recall_with_error( GT_D, D, error):
if len(D) != len(GT_D) or len(D[0]) != len(GT_D[0]):
print("dimensions of ground truth are not equal to the result ")
return "error"
counter = 0
sum1 = 0
sum2 = 0
for i in range(len(D)):
sum1 = np.sum(D[i])
sum2 = np.sum(GT_D[i])
sum2 = sum2 * (1+error)
#print(np.sqrt(sum1), np.sqrt(sum2))
if sum1 > sum2:
#print (sum (D[i]), sum ((GT_D[i] * (1 + error))))
#print (sum1, sum2)
counter = counter + 1
sum1 = 0
sum2 = 0
dimension = len(D)* len(D[0])
rows = len(D)
recall = (((float(rows) - float(counter)) / float(rows) ) * 100)
print("Total recall for D {0} \n".format(recall))
print ("total values in gt vs. errors ", rows, counter)
return recall
if __name__ == "__main__":
main(sys.argv[0:])
| [
"[email protected]"
] | |
430f9a2e0283749bef2a1729386ed9aea46c6881 | 90eeee8c1b675cf8a819bb3e7ba5fe2ffcff1152 | /LogTest.py | 655c7e23a6ae14707756010c899cfa628b1606b0 | [] | no_license | psu-powerlab/EGoT-ME | 5d7b49450a9521059732d483f18592334fe961c8 | fa804b56faffcfb3e3de780b01053dbbb0974d0e | refs/heads/main | 2023-05-11T00:47:30.321817 | 2021-04-05T19:17:44 | 2021-04-05T19:17:44 | 354,949,394 | 0 | 0 | null | 2021-04-09T21:20:53 | 2021-04-05T19:30:58 | null | UTF-8 | Python | false | false | 6,707 | py | '''
This is a test script that will either develop into or be replaced by the eventual 'Log API' as designed for the EGot
Modeling Environment (ME).
~Sean Keene, Portland State University, 2021
[email protected]
'''
from gridappsd import GridAPPSD, goss
from gridappsd import topics as t
from gridappsd.simulation import Simulation
from gridappsd.topics import simulation_input_topic, simulation_output_topic, simulation_log_topic
import time
import json
import pandas as pd
import csv
global simulation_id, end_program, flag, w
flag = 0
def callback(headers, message):
global end_program, flag, w
publish_to_topic = simulation_input_topic(simulation_id)
if type(message) == str:
message = json.loads(message)
if 'message' not in message:
if message['processStatus'] == 'COMPLETE' or \
message['processStatus']=='CLOSED':
print('The End')
gapps.query_object_dictionary(model_mrid, None, '_08175e8f-b762-4c9b-92c4-07f369f69bd4')
end_program = True
else:
#Uncomment for troubleshooting
#print(message)
'''
Only runs the first output query. Grabs the keys from the message, looks them up for their real names, and
writes the header with the real names while still using the mrids as headers for dictwriter purposes.
'''
if flag == 0:
header_mrids = message['message']['measurements'].keys()
header_names = []
print(header_mrids)
for i in header_mrids:
lookup_mrid = next(item for item in meas_object_list if item['measid'] == i)
lookup_name = lookup_mrid['name']
header_names.append(lookup_name)
w = csv.DictWriter(f, header_mrids)
print(header_names)
header_names_dict = dict(zip(list(header_mrids),header_names))
w.writerow(header_names_dict)
flag = 1
else:
pass
print(type(message))
w.writerow(message['message']['measurements'])
def callback2(headers, message):
global end_program
publish_to_topic = simulation_log_topic(simulation_id)
if type(message) == str:
message = json.loads(message)
print(message)
if 'message' not in message:
if message['processStatus'] == 'COMPLETE' or \
message['processStatus']=='CLOSED':
print('The End')
end_program = True
else:
print(message)
#Connect to GridAPPS
gapps = GridAPPSD("('localhost', 61613)", username='system', password='manager')
#Define topic and message for id query
topic = t.REQUEST_POWERGRID_DATA
message = {
"requestType": "QUERY_MODEL_NAMES",
"resultFormat": "JSON"
}
#Query the model names. We know the mrid already, but this gives us our simulation id as well.
x = gapps.get_response(topic, message)
simulation_id = x["id"]
model_mrid = "_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62" #for 13 node feeder
# #Playing around with queries. This gets us object IDs from the 13 node model.
# topic = "goss.gridappsd.process.request.data.powergridmodel"
# message = {
# "requestType": "QUERY_OBJECT_IDS",
# "resultFormat": "JSON",
# "modelId": model_mrid
# }
#
# object_dict = gapps.get_response(topic, message)
# print('Object dictionary: \n')
# print(object_dict)
# print('\n')
# object_mrid_list = object_dict['data']['objectIds']
# print('Object List: \n')
# print(object_mrid_list)
#Initialize lookup list for measurement mrid names
topic = "goss.gridappsd.process.request.data.powergridmodel"
message = {
"modelId": model_mrid,
"requestType": "QUERY_OBJECT_MEASUREMENTS",
"resultFormat": "JSON",
}
object_meas = gapps.get_response(topic, message)
meas_object_list = object_meas['data']
#Example for how to grab a name given a measurement id
testvar = next(item for item in meas_object_list if item['measid'] == '_08175e8f-b762-4c9b-92c4-07f369f69bd4')
print(testvar)
name = testvar['name']
print(name)
#Playing around with simulations. See if I can get a 13 node, 120 second simulation running.
topic = t.REQUEST_SIMULATION
run_config_13 = {
"power_system_config": {
"GeographicalRegion_name": "_73C512BD-7249-4F50-50DA-D93849B89C43",
"SubGeographicalRegion_name": "_ABEB635F-729D-24BF-B8A4-E2EF268D8B9E",
"Line_name": "_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62"
},
"application_config": {
"applications": []
},
"simulation_config": {
"start_time": "1570041113",
"duration": "21",
"simulator" : "GridLAB-D",
"timestep_frequency": "1000",
"timestep_increment": "1000",
"run_realtime": True,
"simulation_name": "ieee123",
"power_flow_solver_method": "NR",
"model_creation_config":{
"load_scaling_factor": "1",
"schedule_name": "ieeezipload",
"z_fraction": "0",
"i_fraction": "1",
"p_fraction": "0",
"randomize_zipload_fractions": False,
"use_houses": False
}
},
# #Test Config is optional! This example is copied from the hackathon for syntax comparison
# "test_config": {
# "events": [{
# "message": {
# "forward_differences": [
# {
# "object": "_6C1FDA90-1F4E-4716-BC90-1CCB59A6D5A9",
# "attribute": "Switch.open",
# "value": 1
# }
# ],
# "reverse_differences": [
# {
# "object": "_6C1FDA90-1F4E-4716-BC90-1CCB59A6D5A9",
# "attribute": "Switch.open",
# "value": 0
# }
# ]
# },
# "event_type": "ScheduledCommandEvent",
# "occuredDateTime": 1570041140,
# "stopDateTime": 1570041200
# }]
# },
}
#Start the simulation....
gapps_sim = GridAPPSD()
simulation = Simulation(gapps_sim, run_config_13)
simulation.start_simulation()
simulation_id = simulation.simulation_id
print(simulation_id)
#Test the callback function
sim_output_topic = simulation_output_topic(simulation_id)
f = open('csvwritertest.csv', 'w')
gapps.subscribe(sim_output_topic, callback)
sim_log_topic = simulation_log_topic(simulation_id)
gapps.subscribe(sim_log_topic, callback2)
def _main():
global end_program
end_program = False
print('test')
while not end_program:
time.sleep(0.1)
if end_program:
f.close()
print('bye')
if __name__ == "__main__":
_main()
| [
"[email protected]"
] | |
e7a59d520063d29f9bbcf51ad325fcc390863548 | 80b489a53f7f211a09920affa5998a0724d83e71 | /webapp/app/main/__init__.py | 886ede6fcf69db65c60813f7f98f6d2bb63254f9 | [] | no_license | mustafatasdemir/apkinspector | d9ec9d85da5e0014edaf0d98119561bf3f87dffc | 1bd0d044b7daef4efda21c985393f8d73722a074 | refs/heads/master | 2016-09-06T03:25:49.212735 | 2014-12-12T03:52:56 | 2014-12-12T03:52:56 | 24,392,752 | 3 | 0 | null | 2014-10-28T03:57:43 | 2014-09-23T23:10:55 | Java | UTF-8 | Python | false | false | 660 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Import Statements
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors | [
"[email protected]"
] | |
00d65bebe2889190bd8e0b186f9d2adff0b3397d | d25ce8e431af70e699ca420dd6f0968cf82afea0 | /Baekjoon/Backtracking/15649.py | b2ae286eacc65300f1feb876d67b5fd9adc21deb | [] | no_license | taehee-kim-dev/Problem-solving | d30fe67673056ba75cdb14ae4ec68be040afeea7 | c0fb94500a0a43fafcf0a5878ae4e6e3f362ec27 | refs/heads/master | 2021-09-28T10:47:43.286288 | 2021-09-14T17:05:09 | 2021-09-14T17:05:09 | 203,376,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | import sys
def solve(level): # 매개변수 = 트리 레벨
if level == M: # 트리의 레벨이 M과 같아지면 result list 모두 출력
for i in result:
print(i, end=' ') # 공백구분
print() # result list 모두 출력하고 개행
return
else:
for i in range(1, N+1): # 1부터 N까지 한 레벨
if not visited[i]: # i번째 숫자를 선택 안했다면,
visited[i] = True # 선택으로 바꾸고,
result.append(i) # 결과 리스트에 i를 append
solve(level + 1) # 다음 레벨의 노드들 탐색
'''
다음 레벨을 탐색하는 함수에서 리턴되었을 때,
다음 레벨에서 현재 레벨로 돌아왔으므로 visited 리스트에서
선택했던 숫자에 대한 boolean값을 True에서 False로 변경
그리고 result list에서 맨 뒷값 제거
'''
visited[i] = False
result.pop()
# 그리고 다음 노드를 검사함
N, M = map(int, sys.stdin.readline().split())
visited = [False for _ in range(N+1)]
result = []
solve(0)
| [
"[email protected]"
] | |
034c61b89e3d11daded3410ec3be279a44cdaf8f | 8cecbbad4062a87b8a9ef68339f2228bcd8d053f | /2013-09-27/直线.py | c6e23faa9e70b653282717059a6a21d9e8d769eb | [] | no_license | XianYX/Python- | 0a5fe645fe20b5d480d8f13d9d5c7e3e79d23016 | ae79ce935b84de59caaa82acf535e0c89f130e79 | refs/heads/master | 2020-12-30T23:23:21.526941 | 2014-10-17T17:38:48 | 2014-10-17T17:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
python 2.7
This program is to draw a line with the given coordinates and calculate the length between them.
It's made by Duan Yi.
Student number:1130310226
E-mail:[email protected]
version:1.1
date:2013-9-27 03:09:54
"""
x1,y1=eval(raw_input("please enter the coordinates of the first point:")) #input the coordinates of the first point
x2,y2=eval(raw_input("please enter the coordinates of the second point:")) #input the coordinates of the second point
length=((x1-x2)**2+(y1-y2)**2)**0.5 #calculate the length between the two point
import turtle #import turtle module
turtle.pensize(2) #set pen size to 2 pixels
turtle.penup() #pull the pen up
turtle.goto(x1,y1) #put the pen to (x1,y1)
turtle.pendown() #pull the pen down
turtle.goto(x2,y2) #draw the line
turtle.penup() #pull the pen up
turtle.goto(x1,y1-10) #put the pen to the top of the first point
turtle.pendown() #pull the pen down
turtle.write((x1,y1),False) #write the coordinate of the first point
turtle.penup() #pull the pen up
turtle.goto(x2,y2+10) #put the pen to the top of the second point
turtle.pendown() #pull the pen down
turtle.write((x2,y2),False) #write the coordinate of the second point
turtle.penup() #pull the pen up
turtle.goto(0.5*(x1+x2)+10,0.5*(y1+y2)+10) #put the pen at the middle of the line
turtle.pendown() #pull the pen down
turtle.write("the length of the line is : "+str(length),False,align="left") #write the length of the line
| [
"[email protected]"
] | |
67327a4756371c5388c16ae850222b175ec36862 | 379117f3152210e3d463f38fd77ba5954b286f9a | /smpt_attachment.py | 7949d66e1751348308e8ac17bd881b83b89b40fb | [] | no_license | gultomian/pisecuritysystem | 9471fc84ea5bf7cb960e6724ec086249652d3c53 | 6c3ee45239f1e223d489c280c7ef6487593c0b73 | refs/heads/master | 2022-12-12T21:24:07.461486 | 2020-09-09T10:01:24 | 2020-09-09T10:01:24 | 249,036,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | import os, glob
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
if int(endtime-starttime)>=60:
starttime=time.time()
daftar_file = glob.glob('Takdikenal/'+'/*.jpg')
file_terbaru = max(daftar_file, key=os.path.getctime)
subject = "Seseorang tidak dikenal berada didepan pintu"
body = "This is an email with attachment sent from Python"
sender_email = "[email protected]"
receiver_email = "[email protected]"
password = 'gultom123'
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
filename = file_terbaru # In same directory as script
# Open PDF file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text) | [
"[email protected]"
] | |
b46bcfcde0bf2c4c8d5d4dcd19f9df7a40a4baa8 | 409a9a748ff385fe51e5a4d390395fb023a31dca | /aws.py | 1377a0c88a912b2f6133ba1115066048e7640a53 | [
"MIT"
] | permissive | sinoosco/AWS-Uploader | 3355b97994880afd0c0a55fb74530439f11d9b1e | d6f4174e25838e3798756197bcfd3947a5bac9e7 | refs/heads/main | 2023-07-05T19:11:54.124573 | 2021-08-06T20:58:13 | 2021-08-06T20:58:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | import os
import logging
import boto3
from botocore.exceptions import ClientError
logging.basicConfig(level=logging.INFO)
def get_s3_resource(*, endpoint_url, aws_access_key_id, aws_secret_access_key):
try:
s3_resource = boto3.resource(
"s3",
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
return s3_resource
except Exception as e:
logging.info(e)
def get_base_name(path):
if not os.path.isdir(path):
raise ValueError("Directory is not valid or doesn't exist.")
try:
base_name = os.path.basename(os.path.normpath(path))
return base_name
except Exception as e:
logging.error(e)
def upload(*, bucket_name, s3_resource, directory, ACL):
try:
bucket = s3_resource.Bucket(bucket_name)
base_name = get_base_name(directory)
count = 0
print()
print(f"Uploading files to {bucket_name}...")
print()
for dirname, dirs, files in os.walk(directory):
for filename in files:
dirname = dirname if dirname.endswith("/") else dirname + "/"
object_name = dirname.split(base_name)[-1][1:]
file_path = dirname + filename
object_name = object_name + filename
with open(file_path, "rb") as file:
bucket.put_object(
ACL=ACL,
Body=file,
Key=object_name
)
print(f"Uploaded '{object_name}'")
count += 1
print()
print(f"{count} files uploaded.")
except ClientError as e:
logging.error(e)
def main():
directory = input("Directory: ")
endpoint_url = input("Endpoint URL: ")
aws_access_key_id = input("AWS access key: ")
aws_secret_access_key = input("AWS secret key: ")
acl = input("ACL: ")
bucket_name = input("Bucket name: ")
s3_resource = get_s3_resource(
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
upload(
bucket_name=bucket_name,
s3_resource=s3_resource,
directory=directory,
ACL=acl,
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
e2d4dc5f52241370cce0140f72b800f0d94fc3ca | 3d2f1e46e697c8e935fa4172a117594fb5f83e06 | /list_query.py | ed445f4bbd69c986af29bcd049b909499e09f759 | [] | no_license | PrasannaVenkadesh/Repeat-counter | cb2a0ef5754a3be59c93cdf9263819033b84e366 | 98661d8e3d1a49cc174e4c39ae9ff97d7af7ead5 | refs/heads/master | 2020-03-30T21:32:32.486748 | 2013-01-10T09:51:53 | 2013-01-10T09:51:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | #!/usr/bin/python
#A Program to get input as a list and display the number of times the words occured in the list along with the word, regardless of case. Filter out and remove punctions.
import re
import string
#main module starts here
def main():
#Input Part starts here
number_of_words = input("Enter number of words: ")
list_data = []
print "Enter the words 1 per line by pressing Enter after each word"
for i in range(0,number_of_words):
list_data.append(raw_input("Enter Word: "))
#Input part Ends here & Processing starts here
converted_string = " ".join(list_data)
converted_string = converted_string.lower()
for c in string.punctuation:
converted_string = converted_string.replace(c,"")
relisted_data = converted_string.split()
dict_data = {};
for item in relisted_data:
if dict_data.has_key(item):
dict_data[item] = dict_data[item] + 1
else:
dict_data[item] = 1
#Processing Ends here and Output starts below
for i in dict_data:
print i, dict_data[i]
#Output Ends here
#main module ends here
if __name__ == "__main__":
main() #Calling main module | [
"[email protected]"
] | |
0487d2de2cc28c9c08f3c0b7c74b40c5609e8228 | 81b848a8944a9fd26a2e80e07894d86a9d4ce861 | /python/04. Sets/008. Set difference() Operation.py | da0ad46116033dfbce5d2645235a52525a37da52 | [] | no_license | Labannya969/Hackerrank-practice | c719cdf362c501b00ec6a84505758e4ee7cfc694 | 7a76e36b860633cbb0d965e19710be09730b3e9e | refs/heads/main | 2023-03-22T16:14:44.501407 | 2021-03-11T14:24:06 | 2021-03-11T14:24:06 | 346,398,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py |
# Enter your code here. Read input from STDIN. Print output to STDOUT
n=int(input())
set1= set(map(int,input().split()))
n=int(input())
set2= set(map(int,input().split()))
count=0
sum=0
set3= set1.intersection(set2)
res=set1.difference(set3)
print(len(res))
| [
"[email protected]"
] | |
59638a7c44b257a3393e174cc395f551766b9d9b | 12ad72a994d5899854324a7854ef8cb99d24c822 | /src/hc.recipe.django/hc/recipe/django/__init__.py | 9e84a0ae85c0a345acaa41783d6bb5fa46ffa74d | [] | no_license | hcwebdev/hc.recipe.django | 6648ed35b599e34aaf1f76277ff8e9c17f46b691 | 8ecc3d4cd57d6f7a57e0b88b919f0e927bdb3907 | refs/heads/master | 2020-04-21T16:34:21.099336 | 2009-11-20T16:55:28 | 2009-11-20T16:55:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | from hc.recipe.django.recipe import Recipe
| [
"[email protected]"
] | |
3115c97bd689389dd23eb5b1eac7eb30f025e4a5 | 62f3825654e80d63172f67a144d2c237a11c2d12 | /chapter3_pythonwebdriverAPI/sec03_3.14.0.py | 1a0798737c9db15e52d12ade3ed408b4fa633fcc | [] | no_license | skyaiolos/selenium2_python | 128ca6237bf744b4f293047ae8eab14ea8d6298b | 2edd2105237ddce183baca39b38b7799f688e592 | refs/heads/master | 2021-01-01T17:59:17.803131 | 2017-07-31T15:30:08 | 2017-07-31T15:30:08 | 98,211,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | __author__ = "Jianguo Jin ([email protected])"
# !/usr/bin/python3
# -*- coding:utf-8 -*-
# Created by Jianguo on 2017/7/28
"""
Description:
第十四节 分页处理
对于 web 页面上的分页功能,我们一般做做以下操作:
? 获取总页数
? 翻页操作(上一页,下一页)
对于有些分页功能提供上一页,下一页按钮,以及可以输入具体页面数跳转功能不在本例的讨论范围
"""
from selenium import webdriver
import time, os
driver = webdriver.Chrome()
driver.get("http://www.baidu.com")
driver.implicitly_wait(10)
# 搜索
driver.find_element_by_id("kw").send_keys("selenium")
driver.find_element_by_id("su").click()
time.sleep(3)
# 将页面滚动条拖到底部
js = "var q=document.documentElement.scrollTop=10000"
driver.execute_script(js)
time.sleep(3)
# 将滚动条移动到页面的顶部
js_ = "var q=document.documentElement.scrollTop=0"
driver.execute_script(js_)
time.sleep(3)
| [
"[email protected]"
] | |
1dcbf3912d75add43aef622fe710efc1fe663551 | faabe34af6297530617395bcc6811350765da847 | /platforms/leetcode/IntersectionofTwoLinkedLists.py | 2c79b3b318933cc3c5d54aafd59a33988fafebfa | [] | no_license | pqnguyen/CompetitiveProgramming | 44a542aea299bd553dd022a9e737e087285b8b6d | 27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78 | refs/heads/master | 2021-07-21T12:15:47.366599 | 2021-06-27T14:58:48 | 2021-06-27T14:58:48 | 132,837,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | # https://leetcode.com/explore/learn/card/linked-list/214/two-pointer-technique/1215/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
la, tailA = self.getLength(headA)
lb, tailB = self.getLength(headB)
if tailA != tailB: return None
while la > lb:
headA = headA.next
la -= 1
while lb > la:
headB = headB.next
lb -= 1
while headA != headB:
headA = headA.next
headB = headB.next
return headA
def getLength(self, head):
length = 0
while head and head.next:
length += 1
head = head.next
if head: length += 1
return length, head
| [
"[email protected]"
] | |
363701b069422fe54a898b6efcb681361138fa40 | d6c04b7c6e823f2c00806f3e662728203dc0ba41 | /bot-trade/exchanges/Model/TradingSymbol.py | b3e8b418a78ee0e72dc41cf135ccd4859b74628a | [] | no_license | yathishbl60/bot-trade | 9e14de7ee7df6822b2c8ccedf8df79d44cf3e452 | 60d9fe3f19874410ca7b8b9c8dea8ecc945f5d1e | refs/heads/master | 2022-12-12T21:01:50.098763 | 2020-04-06T10:37:49 | 2020-04-06T10:37:49 | 248,813,994 | 0 | 0 | null | 2022-12-08T03:51:07 | 2020-03-20T17:26:29 | Python | UTF-8 | Python | false | false | 71 | py | class TradingSymbol:
def __init__(self):
self.symbol = ""
| [
"[email protected]"
] | |
ba180da8366a1c4dc186e7e6d77e49b2a59441f6 | 3ce0179378c82797a4c02aaf7a1a3bb485b36e3b | /polici.py | 5031632fec55ea6f290b3109c18b975ccd3dac3e | [] | no_license | pedroleon21/intro-prog-comp | 3ccb4edcb2c4e62ea7d747f19a14f1affc1bb4cf | 78987afa70915279fbdc1955751f836cb1c43de1 | refs/heads/master | 2020-09-08T21:15:56.331139 | 2019-12-17T17:20:37 | 2019-12-17T17:20:37 | 221,243,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | qtd=int()
string=input()
police=case=0
for i in range(0,len(string)):
if(string[i] == -1 and police==0):
crime+=1
if(string[i]== 1):
police+=1
print(crime) | [
"[email protected]"
] | |
397e9d6b78cd7d94329a85281f462a17c4a43b7b | ccc056a86af7d3209101e427286e441d7f8020af | /cifar/cat/to-format.py | 653c043e98b6b560a7c4252e914b6d8a07389d69 | [] | no_license | kkleidal/tensorflow-tutorials | 6a31b592dcf56f6f2d2ab1bb50f650dc9f5383ce | 4ff34b816d2a3530855fd97080b26316ed584185 | refs/heads/master | 2021-01-12T05:59:40.899093 | 2017-01-06T00:19:12 | 2017-01-06T00:19:12 | 77,267,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | import numpy as np
from PIL import Image
CAT_LABEL = 3
im = Image.open("cat-small.png") #Can be many different formats.
pix = im.load()
width, height = im.size
out = np.zeros((3, 32, 32), int)
for y in xrange(height):
for x in xrange(width):
for c, val in enumerate(pix[x,y]):
out[c, 4 + y, 4 + x] = val
raw = [CAT_LABEL] + out.flatten().tolist()
with open("cat-small.bin", "wb") as f:
f.write(bytearray(raw))
| [
"[email protected]"
] | |
9504b7b448b237c26b47dee53167b5ead3bebe15 | cb5fdb8b7c64cf6c4292b355f68e106e6d6c95c3 | /实验二/exp2.5.py | c60e24529154606e28759107b150e621a7339113 | [] | no_license | 13076883047/Data-Mining | 5e87334a5dde48341649198105ae9ad512f9dc99 | e20349703a21bf60b0d7409c07df6e3637181197 | refs/heads/main | 2023-02-07T18:50:43.183496 | 2020-12-26T08:14:20 | 2020-12-26T08:14:20 | 324,275,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,167 | py | # 5. 根据相关矩阵,找到距离每个样本最近的三个样本,得到100x3的矩阵(每一行为对应三个样本的ID)输出到txt文件中,以\t,\n间隔
import pandas as pd
import numpy as np
import seaborn as sn
import math
import matplotlib.pyplot as plt
pd.set_option('display.max_rows',None) # 显示全部行
pd.set_option('display.max_colum',None) # 显示全部列
def mean_list(list)->float: ## 返回列表的平均值,计算时跳过空缺值
sum = float(0)
n = float(len(list))
for num in list:
if math.isnan(num):
n -= 1
continue
sum += num
# list 中没有有效值
if n == 0:
return float('nan')
mean = sum / n
return mean
def SD_list(list,mean):
'''
返回列表的标准差,计算时跳过空缺值
'''
#均值为nan直接返回nan
if math.isnan(mean):
return float('nan')
sumX2=float(0)
n = float(len(list))
for num in list:
#跳过空缺值
if math.isnan(num):
n-=1
continue
sumX2 += pow(num,2)
SD_list = math.sqrt(sumX2/n - pow(mean,2))
return SD_list
def z_score(df,colList=None):
#如果没有给出列名则对整个dataframe作z-score归一化
if colList is None:
#获取dataframe列名列表
colLabel = df.columns.values.tolist()
for label in colLabel:
cList = df[label].tolist()
cMean = mean_list(cList)
cSD = SD_list(cList,cMean)
cList = df[label].tolist()
rowIndex = df.index.values.tolist()
i=0
for index in rowIndex:
new_num = round((cList[i]-cMean)/cSD,5)
i+=1
df.loc[index,label] = new_num
return
#否则只对指定列进行z-score归一化
for label in colList:
cList = df[label].tolist()
cMean = mean_list(cList)
cSD = SD_list(cList,cMean)
df[label].fillna(cMean,inplace=True)
cList = df[label].tolist()
rowIndex = df.index.values.tolist()
i=0
for index in rowIndex:
new_num = round((cList[index]-cMean)/cSD,5)
i+=1
df.loc[index,label] = new_num
return
def corMatrix(df):
colLabels = df.columns.values.tolist()
#列数量,这里做维度
dimension = df.shape[1]
#行数量,这里做样本数量
sampleNum = df.shape[0]
#用于存储每个维度平均值的list
col_mean = []
#求每个列的平均值,调用mean_list(list)函数
for column in colLabels:
c_mean = mean_list(df[column].values.tolist())
col_mean.append(round((c_mean),8))
#开始求相关矩阵
correlationMatrix = []
for i in range(dimension):
valuesRow = []
for j in range(dimension):
icol = df.iloc[:,i].tolist()
jcol = df.iloc[:,j].tolist()
for k in range(sampleNum):
icol[k] = round(icol[k]-col_mean[i],7)
jcol[k] = round(jcol[k]-col_mean[j],7)
result = 0
for index in range(sampleNum):
result+=icol[index]*jcol[index]
result = round(result/(sampleNum-1),6)
valuesRow.append(result)
correlationMatrix.append(valuesRow)
return correlationMatrix
if __name__ == '__main__':
#读取合并后的数据
df = pd.read_csv('D:\大三上\数据挖掘\data.csv')
#去除完全空缺的列
isnan = df.isnull().all()
dropCol = isnan[isnan.values==True].index.tolist()
df.drop(columns=dropCol,inplace=True)
#提取成绩数值数据
dfScore = df.iloc[:,5:16]
#z-score归一化
z_score(dfScore)
#进行矩阵的转置
dfScore = dfScore.T
#将转置后的矩阵列名类型转换为string
dfScore.columns = dfScore.columns.map(str)
#再对矩阵进行z-score归一化
z_score(dfScore)
# print(dfScore)
resultMatrix = corMatrix(dfScore)
nprm = np.array(resultMatrix)
dfresult = pd.DataFrame(data = nprm)
#print(dfresult)
row = dfresult.shape[0]
col = dfresult.shape[1]
list_b = [[0 for i in range(3)] for j in range(row)]
list_c = [[0 for i in range(3)] for j in range(row)]
for r in range(row):
for c in range(col):
if r != c:
if dfresult[r][c] > list_b[r][0]:
list_b[r][0] = dfresult[r][c]
list_c[r][0] = df['ID'][c]
elif dfresult[r][c] > list_b[r][1]:
list_b[r][1] = dfresult[r][c]
list_c[r][1] = df['ID'][c]
elif dfresult[r][c] > list_b[r][2]:
list_b[r][2] = dfresult[r][c]
list_c[r][2] = df['ID'][c]
#print(list_b)
su = np.array(list_b)
su_c = np.array(list_c)
# print(su)
dfsu = pd.DataFrame(data = su)
dfsu_c = pd.DataFrame(data = su_c)
print(dfsu_c)
dfsu_c.to_csv('D:\大三上\数据挖掘\exp2.5.txt',sep = "\t")
# print(df['ID'][1]) | [
"[email protected]"
] | |
d2fdd8d26b96274701770b2711e7425e1e6fdb17 | fb732b0205be0b5e00e9bca5e70a59983b215959 | /qa_grover_grove.py | 2fa184f0c9b4996cc9b8067a87f081c1c830b5ea | [] | no_license | ghellstern/quantum | 573af5a9d46f87b5dda0f7781dfc2a33d90ee513 | d0561d888e0fb87363419562bae5f4d79503d71b | refs/heads/master | 2020-05-25T06:59:08.451151 | 2019-01-27T10:43:23 | 2019-01-27T10:43:23 | 187,677,419 | 1 | 0 | null | 2019-05-20T16:32:29 | 2019-05-20T16:32:29 | null | UTF-8 | Python | false | false | 754 | py | import numpy as np
from grove.amplification.grover import Grover
from pyquil import get_qc
# Bitstring Map as an algorithm input
SEARCHED_STRING = "1011010"
N = len(SEARCHED_STRING)
mapping = {}
for b in range(2 ** N):
pad_str = np.binary_repr(b, N)
if pad_str == SEARCHED_STRING:
mapping[pad_str] = -1
else:
mapping[pad_str] = 1
# Connection
qc = get_qc('9q-qvm')
#==============================================================================
# Grove: Grove's Search Algorithm
#==============================================================================
# Run
algo = Grover()
ret_string = algo.find_bitstring(qc, bitstring_map=mapping)
print("The searched string is: {}".format(ret_string)) | [
"[email protected]"
] | |
2762d5b3bb66fcbaf71e5dcfe6f251939746ccd3 | a3f2e68ac99394eac00d1f38b96a184a2e37cc2c | /src/com/zimu/python/base/for.py | 860396bc13594a01597ca122c13849a1e6303513 | [] | no_license | chenlunyiran/python | aec84fe7b90bd39b217674f7f1e74ad61eae9d52 | 9dac1f9dacbe6155f850a1c454293103ce02c3eb | refs/heads/master | 2021-08-08T11:33:10.910275 | 2017-11-10T07:29:49 | 2017-11-10T07:29:49 | 110,198,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | numList = [1,2,3,4,5]
sum = 0
for x in numList:
print(x)
sum = sum + x
print ("sum:")
print (sum)
print (sum) | [
"[email protected]"
] | |
2f29d0cd97f0be44eb260b6fa5f19ca891c4cd03 | 1348ae98762eab4befb6d48bda156e105dd219f6 | /plotting_files/plot_b_val_vs_col_den.py | b7e1cbc3ed311b5634db8e88eb798f012df102ec | [] | no_license | xnaxe29/Voigt_HIH2_abs_fitter | bd37278d9d2536c4dd20be9eeace6093cd6bd63a | 4de2df54201812c013dae3e6ca9543c6db9f2ce7 | refs/heads/master | 2020-12-13T05:58:46.085471 | 2020-01-17T17:44:31 | 2020-01-17T17:44:31 | 234,329,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,171 | py | import matplotlib.pyplot as plt
import numpy as np
import sys
import matplotlib.ticker as ticker
b_val_plot = np.array([1., 2.,3.,4.,5.,6.,7.,8.,9.,10.])
size_of_font = 20
file_name_b_val = str(sys.argv[1])
file_name_b_val_err = str(sys.argv[2])
#print (file_name_b_val)
#print (file_name_b_val_err)
#quit()
b_val_new_array = np.loadtxt(file_name_b_val)
b_val_new_array_err = np.loadtxt(file_name_b_val_err)
b_val_new_array = np.nan_to_num(b_val_new_array)
b_val_new_array_err = np.nan_to_num(b_val_new_array_err)
#print (b_val_new_array)
#print (b_val_new_array_err)
#quit()
logN_HI_fit_array3 = b_val_new_array[:-1,0]
logN_HI_fit_array_err3 = b_val_new_array_err[:-1,0]
logN_H2J0_fit_array3 = b_val_new_array[:-1,1]
logN_H2J0_fit_err_array3 = b_val_new_array_err[:-1,1]
logN_H2J1_fit_array3 = b_val_new_array[:-1,2]
logN_H2J1_fit_err_array3 = b_val_new_array_err[:-1,2]
logN_H2J2_fit_array3 = b_val_new_array[:-1,3]
logN_H2J2_fit_err_array3 = b_val_new_array_err[:-1,3]
logN_H2J3_fit_array3 = b_val_new_array[:-1,4]
logN_H2J3_fit_err_array3 = b_val_new_array_err[:-1,4]
logN_H2J4_fit_array3 = b_val_new_array[:-1,5]
logN_H2J4_fit_err_array3 = b_val_new_array_err[:-1,5]
logN_H2J5_fit_array3 = b_val_new_array[:-1,6]
logN_H2J5_fit_err_array3 = b_val_new_array_err[:-1,6]
logN_H2J6_fit_array3 = b_val_new_array[:-1,7]
logN_H2J6_fit_err_array3 = b_val_new_array_err[:-1,7]
logN_H2J7_fit_array3 = b_val_new_array[:-1,8]
logN_H2J7_fit_err_array3 = b_val_new_array_err[:-1,8]
logN_HDJ0_fit_array3 = b_val_new_array[:-1,9]
logN_HDJ0_fit_err_array3 = b_val_new_array_err[:-1,9]
logN_HDJ1_fit_array3 = b_val_new_array[:-1,10]
logN_HDJ1_fit_err_array3 = b_val_new_array_err[:-1,10]
logN_HDJ2_fit_array3 = b_val_new_array[:-1,11]
logN_HDJ2_fit_err_array3 = b_val_new_array_err[:-1,11]
b_fit_array3 = b_val_new_array[:-1,12]
b_fit_err_array3 = b_val_new_array_err[:-1,12]
f, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8)) = plt.subplots(2, 4, sharex='col', sharey='row', figsize=(16, 8), dpi=300)
#f, axarr = plt.subplots(5, 5, sharex='col', sharey='row')
#print (logN_H2J0_fit_array3)
ax1.errorbar(b_val_plot, logN_H2J0_fit_array3, yerr=logN_H2J0_fit_err_array3)
ax1.set_title('H2J0', fontsize=1.5*size_of_font)
ax1.set_ylabel('logN', fontsize=size_of_font)
ax1.set_ylim(logN_H2J0_fit_array3.min()-5, logN_H2J0_fit_array3.max()+5)
#print (logN_H2J0_fit_array3)
ax2.errorbar(b_val_plot, logN_H2J1_fit_array3, yerr=logN_H2J1_fit_err_array3)
ax2.set_title('H2J1', fontsize=1.5*size_of_font)
ax3.errorbar(b_val_plot, logN_H2J2_fit_array3, yerr=logN_H2J2_fit_err_array3)
ax3.set_title('H2J2', fontsize=1.5*size_of_font)
ax4.errorbar(b_val_plot, logN_H2J3_fit_array3, yerr=logN_H2J3_fit_err_array3)
ax4.set_title('H2J3', fontsize=1.5*size_of_font)
ax5.errorbar(b_val_plot, logN_H2J4_fit_array3, yerr=logN_H2J4_fit_err_array3)
ax5.set_title('H2J4', fontsize=1.5*size_of_font)
ax5.set_ylabel('logN', fontsize=size_of_font)
ax5.set_ylim(logN_H2J4_fit_array3.min()-5, logN_H2J4_fit_array3.max()+5)
ax6.errorbar(b_val_plot, logN_H2J5_fit_array3, yerr=logN_H2J5_fit_err_array3)
ax6.set_title('H2J5', fontsize=1.5*size_of_font)
ax7.errorbar(b_val_plot, logN_H2J6_fit_array3, yerr=logN_H2J6_fit_err_array3)
ax7.set_title('H2J6', fontsize=1.5*size_of_font)
ax5.set_xlabel(r'b value (Km s$^{-1}$)', fontsize=size_of_font)
ax6.set_xlabel(r'b value (Km s$^{-1}$)', fontsize=size_of_font)
ax7.set_xlabel(r'b value (Km s$^{-1}$)', fontsize=size_of_font)
ax8.set_xlabel(r'b value (Km s$^{-1}$)', fontsize=size_of_font)
saved_filename = file_name_b_val[:-11] + 'vs_col_den.pdf'
ax1.margins(y=.1, x=.1)
ax2.margins(y=.1, x=.1)
ax3.margins(y=.1, x=.1)
ax4.margins(y=.1, x=.1)
ax5.margins(y=.1, x=.1)
ax6.margins(y=.1, x=.1)
ax7.margins(y=.1, x=.1)
ax8.margins(y=.1, x=.1)
'''
ax1.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(0.2))
ax1.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax1.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax2.yaxis.set_minor_locator(ticker.MultipleLocator(0.2))
ax2.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax2.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax3.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax3.yaxis.set_minor_locator(ticker.MultipleLocator(0.2))
ax3.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax3.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax4.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax4.yaxis.set_minor_locator(ticker.MultipleLocator(0.2))
ax4.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax4.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax5.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax5.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax5.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax5.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax6.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax6.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax6.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax6.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax7.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax7.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax7.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax7.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax8.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax8.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax8.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax8.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
'''
ax1.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax2.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax3.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax4.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax5.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax6.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax7.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax8.tick_params(axis = 'both', which = 'major', direction='in', length=10, width=2, colors='k')
ax1.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax2.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax3.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax4.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax5.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax6.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax7.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
ax8.tick_params(axis = 'both', which = 'minor', direction='in', length=5, width=1, colors='k')
plt.savefig(saved_filename)
print ('saved')
quit()
| [
"[email protected]"
] | |
aaf7e92372fffd28588ddef6f81552b2d62a3937 | 287b58e78286c6f3226f330af5082dcbe7132bed | /blog/migrations/0003_auto_20170512_1513.py | a53e1da7a92e23768af6c6087279c89afc483299 | [] | no_license | maridigiolo/django-exercise | 0a5f16431dda379b9ae97ac3df7a36a4b8ea243b | 386e283bdf51671412cbf7d6d3d09b5514a35838 | refs/heads/master | 2021-01-20T14:39:31.369322 | 2017-05-16T14:08:46 | 2017-05-16T14:08:46 | 90,642,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-12 15:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20170511_1853'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
]
| [
"[email protected]"
] | |
c7a6d0a4440e9eb6c1af76f5603c4cbae2e68521 | ee88c4690ce952ef7d07b29c78963b9904275630 | /config/urls.py | 2685435794c2cf31ff22c2dcc1c12b7d4fc737d1 | [
"MIT"
] | permissive | luiscberrocal/acp-calendar-dev-project | 53a88a61002dcd3058fd0ef9fdaa8aa8a1fefec6 | d259a9db613136c3567a1e6e5750fc74d27f08c6 | refs/heads/master | 2021-01-17T17:46:43.163262 | 2016-12-29T13:32:49 | 2016-12-29T13:32:49 | 69,179,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('acp_calendar_project.users.urls', namespace='users')),
url(r'^calendar/', include('acp_calendar.urls', namespace='calendar')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
import debug_toolbar
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls))]
| [
"[email protected]"
] | |
f68997466b45549f925dca7694946614ad7d4fd5 | 965e163df916b01d647953f2b1431d265683f6ca | /desktop_local_tests/linux/test_linux_ip_responder_disrupt_interface.py | 9c530092e4332d08a2aad027c5f16ab34a6f3077 | [
"MIT"
] | permissive | expressvpn/expressvpn_leak_testing | 6505c39228d396caff0c2df3777009c6fbdf3127 | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | refs/heads/master | 2023-08-18T06:33:33.931040 | 2021-10-11T03:02:50 | 2021-10-11T03:02:50 | 112,572,905 | 244 | 48 | MIT | 2021-01-19T16:02:18 | 2017-11-30T06:18:40 | Python | UTF-8 | Python | false | false | 421 | py | from desktop_local_tests.local_ip_responder_test_case_with_disrupter import LocalIPResponderTestCaseWithDisrupter
from desktop_local_tests.linux.linux_interface_disrupter import LinuxInterfaceDisrupter
class TestLinuxIPResponderDisruptInterface(LocalIPResponderTestCaseWithDisrupter):
# TODO: Docs
def __init__(self, devices, parameters):
super().__init__(LinuxInterfaceDisrupter, devices, parameters)
| [
"[email protected]"
] | |
57b1b4a7c48e79845070c55ebb0b34375345e2f7 | 9df9b12e7e97c290b473398e5b9bac23ceb28fa5 | /src/MainTask/main.py | 793c8ecf5bdd9965da39d28ff6dacd2158cd8bbe | [] | no_license | wangjsfm/VoiceAlerm | 5bfa2486ff4d6fbd664e2bbda37d762a682aee20 | a72b282a9f0cee5e2a7dfc1a9d655d2989817a69 | refs/heads/master | 2020-04-30T05:11:29.813610 | 2019-03-21T11:10:26 | 2019-03-21T11:10:26 | 176,623,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | #coding=utf-8
#Version: V 1.0
author: 'WangSheng'
date: '2019/3/20 8:31'
import threading,time
from src.ServiceTask.VoiceService import VoiceTask
from src.ServiceTask.TemDiffMonitorService import tempratureDiffMonit
from src.ServiceTask.ModInitService import TempModelInit
from src.ServiceTask.TemMoniService import TempratureMonitor
VoiceText= ['系统已上线,开始监测锅炉壁温....',]
def test(vo):
while True:
print(vo)
time.sleep(2)
if __name__ == '__main__':
#各区域模型
areaModelMap = TempModelInit()
#声音报警
voieThread = threading.Thread( target=VoiceTask, args=(VoiceText,) )
#温差监控
diffMonitorThread = threading.Thread(target=tempratureDiffMonit, args=(VoiceText,))
#壁温监控
tempMoniThread = threading.Thread(target=TempratureMonitor, args=(areaModelMap,VoiceText))
# testThread = threading.Thread( target=test, args=(VoiceText,) )
voieThread.start()
tempMoniThread.start()
diffMonitorThread.start()
| [
"[email protected]"
] | |
8191099a12f2c8606ff0b7c7276cf7adb63063ac | eaa07ce37e8a01aed0207f6707facc9675ee4245 | /liczby_pierwsze.py | c5bbfec1dd2fb6fc98aa312d2d1a82c42fd01921 | [] | no_license | jpalmowswroc/NOKIA_Python | f0a77e9a6edaf3553c2c8ecb267a2fc3fe9b8717 | 076e0a69cd592e0132738f5e53670fe13f2603c8 | refs/heads/master | 2020-03-25T10:50:06.349810 | 2018-10-08T13:55:45 | 2018-10-08T13:55:45 | 143,707,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | d = int(input("Podaj liczbe."))
liczby = []
for i in range(d):
liczby.append(i)
for x in liczby:
for i in range(2, x):
if x % i == 0:
print (x, " dzieli się przez ", i)
break
else:
print (x, " jest liczbą pierwszą")
| [
"[email protected]"
] | |
fd786edc5e091c8188505bf10510de95901f757a | 12df960cb9633cc00fbd970eeee612a5f2166bcc | /hfpy2e-all/hfpy2e-all/ch04/mystery2.py | cce8aa5e081ce3b84689f52f298bb78304fdfc09 | [] | no_license | lpizarro2391/HEAD-FIRST-PYTHON | 0e54cde2c2639be4c59d8295be4f0b6d95048958 | 8fb1740f8a35b2f39f921dfe8fac69e23c0bcbab | refs/heads/master | 2023-03-26T05:26:36.416336 | 2021-03-30T03:32:31 | 2021-03-30T03:32:31 | 315,743,520 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py |
def double(arg):
print('Before: ', arg)
print(id(arg))
arg = arg * 2
print('After: ', arg)
print(id(arg))
def change(arg):
print('Before: ', arg)
print(id(arg))
arg.append('More data')
print('After: ', arg)
print(id(arg))
| [
"[email protected]"
] | |
a486b8053059c903fcbae7ee683707681e94c97e | 90fb55320c81259cb199b9a8900e11b2ba63da4f | /1/summing.py | ba2262a8a15fe42c75e98c7269c79b194026bcb2 | [] | no_license | pogross/bitesofpy | f9bd8ada790d56952026a938b1a34c20562fdd38 | 801f878f997544382e0d8650fa6b6b1b09fa5b81 | refs/heads/master | 2020-05-19T07:31:44.556896 | 2020-01-26T12:48:28 | 2020-01-26T12:48:28 | 184,899,394 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | def sum_numbers(numbers=None):
if numbers is None:
numbers = range(1, 101)
return sum(numbers)
| [
"[email protected]"
] | |
23114f378d9fd15c2b284f1816308b838765ea0b | fcc6c2eeac4ae38c4fca73dcc2d7a6cc86cf7244 | /rac/know/urls.py | 48e5fb5936505f6b4d2bfd219e181d6952e5c854 | [] | no_license | 1122Rishabh/Knowladge-Website | 6a939c7420f4accb75cb1f1333eb57bc295f90da | 663ec6a7561ab1f9ffa95a4ff5d741d9d3f9e318 | refs/heads/main | 2023-03-20T19:33:17.855721 | 2021-03-15T08:26:22 | 2021-03-15T08:26:22 | 347,732,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | from django.urls import path
from . import views
urlpatterns = [
path("",views.index,name="KnowHome"),
path("index/",views.index,name="indexus"),
path("contact/",views.contact,name="ContactUs"),
path("about/",views.about,name="AboutUs"),
path("php/",views.php,name="phpus"),
path("javascript/",views.javascript,name="ContactUs"),
path("C/",views.C,name="ContactUs"),
path("Cn/",views.Cn,name="ContactUs"),
path("ml/",views.ml,name="ContactUs"),
path("ai/",views.ai,name="ContactUs"),
path("java/",views.java,name="ContactUs"),
path("python/",views.python,name="ContactUs"),
path("ide/",views.ide,name="ContactUs"),
path("javaide/",views.javaide,name="ContactUs"),
path("phpide/",views.phpide,name="ContactUs"),
path("javascriptide/",views.javascriptide,name="ContactUs"),
path("cide/",views.cide,name="ContactUs"),
path("cnide/",views.cnide,name="ContactUs"),
] | [
"[email protected]"
] | |
a543b07fa98c6571db483bd6b367a1e42481d991 | 9e30fe3e8a6cd951d4f197bec869681e5eac85b1 | /HFPN model/integrated-model/integrated_HFPN.py | b3193afb4b6bb20bca8b2c26b0f4689cb8508094 | [
"BSD-3-Clause"
] | permissive | PN-Alzheimers-Parkinsons/PN_Alzheimers_Parkinsons | 0bdf67bc84805499345eb973c60289e6cafa1291 | 8e9a3a8151069757475808c48511c9d7486ea334 | refs/heads/main | 2023-03-01T02:33:47.205003 | 2021-02-04T10:37:06 | 2021-02-04T10:37:06 | 308,452,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,948 | py | import os
import sys
cwd = os.getcwd()
root_folder = os.sep+"team-project"
sys.path.insert(0, cwd[:(cwd.index(root_folder)+len(root_folder))] + os.sep+"utils"+os.sep)
# Import class to work with hybrid functional Petri nets
from hfpn import HFPN
# Import parameters, rate equations, initial tokens and firing conditions
from parameters import *
from rate_functions import *
from initial_tokens import *
from firing_conditions import *
def add_integrated_cholesterol_homeostasis(hfpn):
## Places
# Cholesterol-ApoE
hfpn.add_place(it_p_ApoEchol_extra,place_id="p_ApoEchol_extra", label="ApoE-chol complex extra", continuous=True)
hfpn.add_place(it_p_ApoEchol_EE,place_id="p_ApoEchol_EE", label="ApoE-chol complex EE", continuous=True)
# Cholesterol in different organelles
hfpn.add_place(it_p_chol_LE,place_id="p_chol_LE", label="Chol LE", continuous=True)
hfpn.add_place(it_p_chol_mito,place_id="p_chol_mito", label="Chol mito", continuous=True)
hfpn.add_place(it_p_chol_ER,place_id="p_chol_ER", label="Chol ER", continuous=True)
hfpn.add_place(it_p_chol_PM,place_id="p_chol_PM", label="Chol PM", continuous=True)
# Oxysterols
hfpn.add_place(it_p_24OHchol_extra,place_id="p_24OHchol_extra", label="24OHchol extra", continuous=True)
hfpn.add_place(it_p_24OHchol_intra,place_id="p_24OHchol_intra", label="24OHchol intra", continuous=True)
hfpn.add_place(it_p_27OHchol_extra,place_id="p_27OHchol_extra", label="27OHchol extra", continuous=True)
hfpn.add_place(it_p_27OHchol_intra,place_id="p_27OHchol_intra", label="27OHchol intra", continuous=True)
hfpn.add_place(it_p_7HOCA,place_id="p_7HOCA", label="7-HOCA", continuous=True)
hfpn.add_place(it_p_preg,place_id="p_preg", label="Pregnenolon", continuous=True)
# PD specific places in cholesterol homeostasis
hfpn.add_place(it_p_GBA1, "p_GBA1","GBA1", continuous = False)
# Connections to other networks
hfpn.add_place(it_p_LB, "p_LB", "Lewy body", continuous = True)
hfpn.add_place(it_p_SNCA_act, "p_SNCA_act","SNCA - active", continuous = True)
hfpn.add_place(it_p_VPS35, "p_VPS35", "VPS35", continuous = True)
hfpn.add_place(it_p_SNCA_olig, "p_SNCA_olig", "SNCA - Oligomerised", continuous = True)
hfpn.add_place(it_p_NPC1_LE, place_id="p_NPC1_LE", label="Mutated NPC1 in LE", continuous=True)
hfpn.add_place(it_p_ROS_mito, place_id="p_ROS_mito", label="Conc of ROS in mito", continuous=True)
hfpn.add_place(it_p_Ca_cyto, "p_Ca_cyto", "Ca - cytosol", continuous = True)
hfpn.add_place(it_p_tauP, 'p_tauP', 'Phosphorylated tau')
# Added apoptosis subnet
hfpn.add_place(it_p_PERK_ER, place_id="p_PERK_ER", label="Conc of PERK in ER", continuous=True)
## Transitions
# Cholesterol Endocytosis
hfpn.add_transition_with_speed_function(
transition_id = "t_LDLR_endocyto",
label = "LDLR endocyto",
input_place_ids = ["p_ApoEchol_extra", "p_chol_ER", "p_LB"],
firing_condition = fc_t_LDLR_endocyto,
reaction_speed_function = r_t_LDLR_endocyto,
consumption_coefficients = [0,0,0],
output_place_ids = ["p_chol_LE"], #"p_ApoEchol_EE"
production_coefficients = [354]) # 1
# Transport Cholesterol from LE to ER
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_LE_ER",
label = "Chol transport LE-ER",
input_place_ids = ["p_chol_LE", "p_NPC1_LE"],
firing_condition = fc_t_chol_trans_LE_ER,
reaction_speed_function = r_t_chol_trans_LE_ER,
consumption_coefficients = [1, 0],
output_place_ids = ["p_chol_ER"],
production_coefficients = [1])
# Transport Cholesterol from LE to mito
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_LE_mito",
label = "Chol transport LE-mito",
input_place_ids = ["p_chol_LE", "p_PERK_ER"],
firing_condition = fc_t_chol_trans_LE_mito,
reaction_speed_function = r_t_chol_trans_LE_mito,
consumption_coefficients = [1, 0],
output_place_ids = ["p_chol_mito"],
production_coefficients = [1])
# Transport Cholesterol from LE to PM
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_LE_PM",
label = "Chol transport LE-PM",
input_place_ids = ["p_chol_LE"],
firing_condition = fc_t_chol_trans_LE_PM,
reaction_speed_function = r_t_chol_trans_LE_PM,
consumption_coefficients = [1],
output_place_ids = ["p_chol_PM"],
production_coefficients = [1])
# Transport Cholesterol from PM to ER
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_PM_ER",
label = "Chol transport PM-ER",
input_place_ids = ["p_chol_PM"],
firing_condition = fc_t_chol_trans_PM_ER,
reaction_speed_function = r_t_chol_trans_PM_ER,
consumption_coefficients = [1],
output_place_ids = ["p_chol_ER"],
production_coefficients = [1])
# Transport Cholesterol from ER to PM
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_ER_PM",
label = "Chol transport ER-PM",
input_place_ids = ["p_chol_ER"],
firing_condition = fc_t_chol_trans_ER_PM,
reaction_speed_function = r_t_chol_trans_ER_PM,
consumption_coefficients = [1],
output_place_ids = ["p_chol_PM"],
production_coefficients = [1])
# Transport Cholesterol from ER to mito
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_ER_mito",
label = "Chol transport ER-mito",
input_place_ids = ["p_chol_ER", "p_PERK_ER"],
firing_condition = fc_t_chol_trans_ER_mito,
reaction_speed_function = r_t_chol_trans_ER_mito,
consumption_coefficients = [1, 0],
output_place_ids = ["p_chol_mito"],
production_coefficients = [1])
# Metabolisation of chol by CYP27A1
hfpn.add_transition_with_michaelis_menten(
transition_id = "t_CYP27A1_metab",
label = "Chol metab CYP27A1",
Km = Km_t_CYP27A1_metab,
vmax = vmax_t_CYP27A1_metab,
input_place_ids = ["p_chol_mito"],
substrate_id = "p_chol_mito",
consumption_coefficients = [1],
output_place_ids = ["p_27OHchol_intra"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP27A1_metab)
# Metabolism of chol by CYP11A1
hfpn.add_transition_with_michaelis_menten(
transition_id = "t_CYP11A1_metab",
label = "Chol metab CYP11A1",
Km = Km_t_CYP11A1_metab,
vmax = vmax_t_CYP11A1_metab,
input_place_ids = ["p_chol_mito"],
substrate_id = "p_chol_mito",
consumption_coefficients = [1],
output_place_ids = ["p_preg"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP11A1_metab)
# Metabolisation of 27OHchol by CYP7B1
hfpn.add_transition_with_michaelis_menten(
transition_id = "t_CYP7B1_metab",
label = "27OHchol metab CYP7B1",
Km = Km_t_CYP7B1_metab,
vmax = vmax_t_CYP7B1_metab,
input_place_ids = ["p_27OHchol_intra"],
substrate_id = "p_27OHchol_intra",
consumption_coefficients = [1],
output_place_ids = ["p_7HOCA"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP7B1_metab)
# Endocytosis of 27OHchol
hfpn.add_transition_with_speed_function(
transition_id = "t_27OHchol_endocyto",
label = "27OHchol endocyto",
input_place_ids = ["p_27OHchol_extra"],
firing_condition = fc_t_27OHchol_endocyto,
reaction_speed_function = r_t_27OHchol_endocyto,
consumption_coefficients = [1],
output_place_ids = ["p_27OHchol_intra", "p_27OHchol_extra"],
production_coefficients = [1,1])
# Metabolisation of chol by CYP46A1
hfpn.add_transition_with_michaelis_menten(
transition_id = "t_CYP46A1_metab",
label = "Chol metab CYP46A1",
Km = Km_t_CYP46A1_metab,
vmax = vmax_t_CYP46A1_metab,
input_place_ids = ["p_chol_ER"],
substrate_id = "p_chol_ER",
consumption_coefficients = [1],
output_place_ids = ["p_24OHchol_intra"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP46A1_metab)
# Exocytosis of 24OHchol
hfpn.add_transition_with_speed_function(
transition_id = "t_24OHchol_exocyto",
label = "24OHchol exocyto",
input_place_ids = ["p_24OHchol_intra"],
firing_condition = fc_t_24OHchol_exocyto,
reaction_speed_function = r_t_24OHchol_exocyto,
consumption_coefficients = [1],
output_place_ids = ["p_24OHchol_extra"],
production_coefficients = [1])
# Transport of Chol into ECM
hfpn.add_transition_with_speed_function(
transition_id = "t_chol_trans_PM_ECM",
label = "Chol transport PM-ECM",
input_place_ids = ["p_chol_PM", "p_24OHchol_intra"],
firing_condition = fc_t_chol_trans_PM_ECM,
reaction_speed_function = r_t_chol_trans_PM_ECM,
consumption_coefficients = [1,0],
output_place_ids = [],
production_coefficients = [])
# PD specific transitions
hfpn.add_transition_with_speed_function(
transition_id = 't_SNCA_bind_ApoEchol_extra',
label = 'Extracellular binding of SNCA to chol',
input_place_ids = ['p_ApoEchol_extra','p_SNCA_act'],
firing_condition = fc_t_SNCA_bind_ApoEchol_extra,
reaction_speed_function = r_t_SNCA_bind_ApoEchol_extra,
consumption_coefficients = [0,1],
output_place_ids = ['p_SNCA_olig'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_chol_LE_upreg',
label = 'Upregulation of chol in LE',
input_place_ids = ['p_GBA1'],
firing_condition = fc_t_chol_LE_upreg,
reaction_speed_function = r_t_chol_LE_upreg,
consumption_coefficients = [0], # GBA1 is an enzyme
output_place_ids = ['p_chol_LE'],
production_coefficients = [1])
### Lewy body pathology
def add_integrated_lewy_body_pathology(hfpn):
# Places
hfpn.add_place(it_p_SNCA_act, "p_SNCA_act","SNCA - active", continuous = True)
hfpn.add_place(it_p_VPS35, "p_VPS35", "VPS35", continuous = True)
hfpn.add_place(it_p_SNCA_inact, "p_SNCA_inact", "SNCA - inactive", continuous = True)
hfpn.add_place(it_p_SNCA_olig, "p_SNCA_olig", "SNCA - Oligomerised", continuous = True)
hfpn.add_place(it_p_LB, "p_LB", "Lewy body", continuous = True)
hfpn.add_place(it_p_Fe2, "p_Fe2", "Fe2 iron pool", continuous = True)
# Connections to other networks
hfpn.add_place(it_p_LRRK2_mut, "p_LRRK2_mut","LRRK2 - mutated", continuous = True)
hfpn.add_place(it_p_27OHchol_intra, "p_27OHchol_intra","27-OH chol - intracellular", continuous = True)
hfpn.add_place(it_p_DJ1, "p_DJ1","DJ1 mutant", continuous = True)
hfpn.add_place(it_p_Ca_cyto, "p_Ca_cyto", "Ca - cytosol", continuous = True)
hfpn.add_place(it_p_ROS_mito, "p_ROS_mito", "ROS - mitochondria", continuous = True)
hfpn.add_place(it_p_tauP, 'p_tauP', 'Phosphorylated tau')
hfpn.add_place(it_p_LAMP2A, place_id="p_LAMP2A", label = "Drug LAMP2A", continuous = True) #therapeutic PD
# Transitions
hfpn.add_transition_with_speed_function(
transition_id = 't_SNCA_degr',
label = 'SNCA degradation by CMA',
input_place_ids = ['p_SNCA_act','p_VPS35','p_LRRK2_mut','p_27OHchol_intra','p_DJ1', 'p_LAMP2A'],
firing_condition = fc_t_SNCA_degr,
reaction_speed_function = r_t_SNCA_degr,
consumption_coefficients = [1,0,0,0,0,0],
output_place_ids = ['p_SNCA_inact'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_SNCA_aggr',
label = 'SNCA aggregation',
input_place_ids = ['p_SNCA_act','p_Ca_cyto','p_ROS_mito', 'p_tauP'],
firing_condition = fc_t_SNCA_aggr,
reaction_speed_function = r_t_SNCA_aggr,
consumption_coefficients = [30,0,0, 0], #should be reviewed if Ca is consumed
output_place_ids = ['p_SNCA_olig'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_SNCA_fibril',
label = 'SNCA fibrillation',
input_place_ids = ['p_SNCA_olig'],
firing_condition = fc_t_SNCA_fibril,
reaction_speed_function = r_t_SNCA_fibril,
consumption_coefficients = [100],
output_place_ids = ['p_LB'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_IRE',
label = 'IRE',
input_place_ids = ['p_Fe2'],
firing_condition = fc_t_IRE,
reaction_speed_function = r_t_IRE,
consumption_coefficients = [0],
output_place_ids = ['p_SNCA_act'],
production_coefficients = [1])
### Tau Pathology
def add_integrated_tau_pathology(hfpn):
### Adding places
# hfpn.add_place(initial_tokens=1, place_id='', label='', continous=True)
hfpn.add_place(it_p_GSK3b_inact, 'p_GSK3b_inact', 'Inactive GSK3 Beta kinase')
hfpn.add_place(it_p_GSK3b_act, 'p_GSK3b_act', 'Active GSK3 Beta kinase')
hfpn.add_place(it_p_tauP, 'p_tauP', 'Phosphorylated tau')
hfpn.add_place(it_p_tau, 'p_tau', 'Unphosphorylated tau (microtubule)')
# from other sub-nets:
hfpn.add_place(it_p_ApoE, 'p_ApoE', 'ApoE genotype') # from Abeta
hfpn.add_place(it_p_Ab, 'p_Ab', 'Abeta') # from Abeta
# connections to other networks
hfpn.add_place(it_p_SNCA_act, "p_SNCA_act","SNCA - active", continuous = True)
hfpn.add_place(it_p_chol_LE,place_id="p_chol_LE", label="Chol LE", continuous=True)
### Adding transitions
hfpn.add_transition_with_speed_function(transition_id = 't_GSK3b_exp_deg',
label = 'GSK3beta expression and degradation',
input_place_ids = ['p_GSK3b_inact'],
firing_condition = fc_t_GSK3b_exp_deg,
reaction_speed_function = r_t_GSK3b_exp_deg,
consumption_coefficients = [0],
output_place_ids = ['p_GSK3b_inact'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(transition_id = 't_actv_GSK3b',
label = 'GSK3beta activation',
input_place_ids = ['p_GSK3b_inact', 'p_ApoE', 'p_Ab'],
firing_condition = fc_t_actv_GSK3b,
reaction_speed_function = r_t_actv_GSK3b,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_GSK3b_act'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(transition_id = 't_inactv_GSK3b',
label = 'GSK3beta inactivation',
input_place_ids = ['p_GSK3b_act'],
firing_condition = fc_t_inactv_GSK3b,
reaction_speed_function = r_t_inactv_GSK3b,
consumption_coefficients = [1],
output_place_ids = ['p_GSK3b_inact'],
production_coefficients = [1])
hfpn.add_transition_with_michaelis_menten(transition_id = 't_phos_tau',
label = 'Phosphorylation of tau',
Km = Km_t_phos_tau,
vmax = kcat_t_phos_tau,
input_place_ids = ['p_tau', 'p_GSK3b_act', 'p_SNCA_act','p_chol_LE'],
substrate_id = 'p_tau',
consumption_coefficients = [1, 0, 0, 0],
output_place_ids = ['p_tauP'],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_phos_tau)
hfpn.add_transition_with_michaelis_menten(transition_id = 't_dephos_tauP',
label = 'Dephosphorylation of tau protein',
Km = Km_t_dephos_tauP,
vmax = vmax_t_dephos_tauP,
input_place_ids = ['p_tauP'],
substrate_id = 'p_tauP',
consumption_coefficients = [1],
output_place_ids = ['p_tau'],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_dephos_tauP)
### Abeta Pathology
def add_integrated_abeta_pathology(hfpn):
# Adding places
hfpn.add_place(it_p_asec, 'p_asec', 'alpha secretase')
hfpn.add_place(it_p_APP_pm, 'p_APP_pm', 'APP in plasma membrane') # input
hfpn.add_place(it_p_sAPPa, 'p_sAPPa', 'Soluble APP alpha')
hfpn.add_place(it_p_CTF83, 'p_CTF83', 'CTF83')
hfpn.add_place(it_p_APP_endo, 'p_APP_endo', 'APP in endosomes')
hfpn.add_place(it_p_bsec, 'p_bsec', 'beta secretase')
hfpn.add_place(it_p_sAPPb, 'p_sAPPb', 'Soluble APP beta')
hfpn.add_place(it_p_CTF99, 'p_CTF99', 'CTF99')
hfpn.add_place(it_p_gsec, 'p_gsec', 'gamma secretase')
hfpn.add_place(it_p_AICD, 'p_AICD', 'AICD')
hfpn.add_place(it_p_Ab, 'p_Ab', 'Abeta')
hfpn.add_place(it_p_ApoE, 'p_ApoE', 'ApoE genotype') # gene, risk factor in AD
hfpn.add_place(it_p_age, 'p_age', 'Age risk factor') # 80 years old, risk factor in AD for BACE1 activity increase
# from other sub-nets:
hfpn.add_place(it_p_chol_PM, 'p_chol_PM', 'Cholesterol plasma membrane') # from cholesterol homeostasis
hfpn.add_place(it_p_24OHchol_intra, 'p_24OHchol_intra', 'Intracellular 24 OH Cholesterol') # from cholesterol homeostasis
hfpn.add_place(it_p_27OHchol_intra, 'p_27OHchol_intra', 'Intracellular 27 OH Cholesterol') # from cholesterol homeostasis
hfpn.add_place(it_p_RTN3_axon, 'p_RTN3_axon', 'monomeric RTN3 (axonal)') # from ER retraction
hfpn.add_place(it_p_cas3, 'p_cas3', 'Active caspase-3') # from energy metabolism
hfpn.add_place(it_p_ROS_mito, 'p_ROS_mito', 'Mitochondrial ROS') # from energy metabolism
### Adding transitions
hfpn.add_transition_with_michaelis_menten(transition_id = 't_APP_asec_cleav',
label = 'Alpha cleavage of APP',
Km = Km_t_APP_asec_cleav,
vmax = kcat_t_APP_asec_cleav,
input_place_ids = ['p_APP_pm', 'p_asec', 'p_chol_PM'],
substrate_id = 'p_APP_pm',
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_sAPPa', 'p_CTF83'],
production_coefficients = [1, 1],
vmax_scaling_function = vmax_scaling_t_APP_asec_cleav)
hfpn.add_transition_with_speed_function(transition_id = 't_asec_exp',
label = 'Alpha secretase expression',
input_place_ids = ['p_24OHchol_intra'],
firing_condition = fc_t_asec_exp,
reaction_speed_function = r_t_asec_exp,
consumption_coefficients = [0],
output_place_ids = ['p_asec'], # none
production_coefficients = [1])
hfpn.add_transition_with_speed_function(transition_id = 't_asec_degr',
label = 'Alpha secretase degradation',
input_place_ids = ['p_asec'],
firing_condition = fc_t_asec_degr,
reaction_speed_function = r_t_asec_degr,
consumption_coefficients = [1],
output_place_ids = [], # none
production_coefficients = []) # none
hfpn.add_transition_with_speed_function(transition_id = 't_APP_exp',
label = 'APP expression rate',
input_place_ids = ['p_ApoE', 'p_ROS_mito'],
firing_condition = fc_t_APP_exp,
reaction_speed_function = r_t_APP_exp,
consumption_coefficients = [0, 0],
output_place_ids = ['p_APP_pm'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(transition_id = 't_APP_endocyto',
label = 'endocytosis',
input_place_ids = ['p_APP_pm', 'p_ApoE'],
firing_condition = fc_t_APP_endocyto,
reaction_speed_function = r_t_APP_endocyto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_APP_endo'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(transition_id = 't_APP_endo_event',
label = 'APP-utilizing cellular events',
input_place_ids = ['p_APP_endo'],
firing_condition = fc_t_APP_endo_event,
reaction_speed_function = r_t_APP_endo_event,
consumption_coefficients = [1],
output_place_ids = [],
production_coefficients = [])
hfpn.add_transition_with_michaelis_menten(transition_id = 't_APP_bsec_cleav',
label = 'Beta cleavage of APP',
Km = Km_t_APP_bsec_cleav,
vmax = kcat_t_APP_bsec_cleav,
input_place_ids = ['p_APP_endo', 'p_bsec', 'p_chol_PM', 'p_age'],
substrate_id = 'p_APP_endo',
consumption_coefficients = [1, 0, 0, 0],
output_place_ids = ['p_sAPPb', 'p_CTF99'],
production_coefficients = [1, 1],
vmax_scaling_function = vmax_scaling_t_APP_bsec_cleav)
hfpn.add_transition_with_speed_function( transition_id = 't_bsec_exp',
label = 'Beta secretase expression',
input_place_ids = ['p_ROS_mito', 'p_27OHchol_intra', 'p_RTN3_axon'],
firing_condition = fc_t_bsec_exp,
reaction_speed_function = r_t_bsec_exp,
consumption_coefficients = [0, 0, 0],
output_place_ids = ['p_bsec'], # none
production_coefficients = [1]) # none
hfpn.add_transition_with_speed_function( transition_id = 't_bsec_degr',
label = 'Beta secretase degradation',
input_place_ids = ['p_bsec'],
firing_condition = fc_t_bsec_degr,
reaction_speed_function = r_t_bsec_degr,
consumption_coefficients = [1],
output_place_ids = [], # none
production_coefficients = []) # none
hfpn.add_transition_with_michaelis_menten(transition_id = 't_CTF99_gsec_cleav',
label = 'Gamma secretase cleavage of CTF99',
Km = Km_t_CTF99_gsec_cleav,
vmax = kcat_t_CTF99_gsec_cleav,
input_place_ids = ['p_CTF99', 'p_gsec', 'p_chol_PM'],
substrate_id = 'p_CTF99',
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_Ab', 'p_AICD'],
production_coefficients = [1, 1],
vmax_scaling_function = vmax_scaling_t_CTF99_gsec_cleav)
hfpn.add_transition_with_speed_function( transition_id = 't_gsec_exp',
label = 'Gamma secretase expression',
input_place_ids = ['p_ROS_mito'],
firing_condition = fc_t_gsec_exp,
reaction_speed_function = r_t_gsec_exp,
consumption_coefficients = [0],
output_place_ids = ['p_gsec'], # none
production_coefficients = [1]) # none
hfpn.add_transition_with_speed_function( transition_id = 't_gsec_degr',
label = 'Gamma secretase degradation',
input_place_ids = ['p_gsec'],
firing_condition = fc_t_gsec_degr,
reaction_speed_function = r_t_gsec_degr,
consumption_coefficients = [1],
output_place_ids = [], # none
production_coefficients = []) # none
hfpn.add_transition_with_speed_function(transition_id = 't_Ab_degr',
label = 'Ab degradation',
input_place_ids = ['p_Ab'],
firing_condition = fc_t_Ab_degr,
reaction_speed_function = r_t_Ab_degr,
consumption_coefficients = [1],
output_place_ids = [],
production_coefficients = [])
def add_integrated_ER_retraction_collapse(hfpn):
### Add places for each chemical species
# Monomeric RTN3 (cycling between axonal and perinuclear regions)
hfpn.add_place(it_p_RTN3_axon, place_id="p_RTN3_axon", label="monomeric RTN3 (axonal)", continuous=True)
hfpn.add_place(it_p_RTN3_PN, place_id="p_RTN3_PN", label="monomeric RTN3 (perinuclear)", continuous=True)
# HMW RTN3 (cycling between different cellular compartments)
hfpn.add_place(it_p_RTN3_HMW_cyto, place_id="p_RTN3_HMW_cyto", label="HMW RTN3 (cytosol)", continuous=True)
hfpn.add_place(it_p_RTN3_HMW_auto, place_id="p_RTN3_HMW_auto", label="HMW RTN3 (autophagosome)", continuous=True)
hfpn.add_place(it_p_RTN3_HMW_lyso, place_id="p_RTN3_HMW_lyso", label="HMW RTN3 (degraded in lysosome)", continuous=True)
hfpn.add_place(it_p_RTN3_HMW_dys1, place_id="p_RTN3_HMW_dys1", label="HMW RTN3 (type I/III dystrophic neurites)", continuous=True)
hfpn.add_place(it_p_RTN3_HMW_dys2, place_id="p_RTN3_HMW_dys2", label="HMW RTN3 (type II dystrophic neurites)", continuous=True)
# Energy metabolism: ATP consumption
hfpn.add_place(it_p_ATP, place_id="p_ATP", label="ATP", continuous=True)
hfpn.add_place(it_p_ADP, place_id="p_ADP", label="ADP", continuous=True)
# Two places that are NOT part of this subpathway, but are temporarily added for establishing proper connections
# They will be removed upon merging of subpathways
hfpn.add_place(it_p_Ab, place_id="p_Ab", label = "Abeta peptide", continuous = True)
hfpn.add_place(it_p_tau, place_id="p_tau", label = "Unphosphorylated tau", continuous = True)
hfpn.add_place(it_p_chol_LE, place_id="p_chol_LE", label = "Cholesterol in late endosomes", continuous = True)
# Connections to other pathways
hfpn.add_place(it_p_LB, "p_LB", "Lewy body", continuous = True)
hfpn.add_place(it_p_LRRK2_mut, "p_LRRK2_mut","LRRK2 - mutated", continuous = True)
hfpn.add_transition_with_speed_function(
transition_id = 't_RTN3_exp',
label = 'Expression rate of RTN3',
input_place_ids = [],
firing_condition = fc_t_RTN3_exp,
reaction_speed_function = r_t_RTN3_exp,
consumption_coefficients = [],
output_place_ids = ['p_RTN3_PN'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_LE_retro',
label = 'retrograde transport of LEs & ER',
input_place_ids = ['p_ATP','p_chol_LE','p_RTN3_axon', 'p_tau','p_LRRK2_mut','p_LB'],
firing_condition = fc_t_LE_retro,
reaction_speed_function = r_t_LE_retro, # get later from PD
consumption_coefficients = [ATPcons_t_LE_trans, 0, 1, 0, 0, 0], # tune these coefficients based on PD
output_place_ids = ['p_ADP','p_RTN3_PN'],
production_coefficients = [ATPcons_t_LE_trans, 1]) # tune these coefficients based on PD
hfpn.add_transition_with_speed_function(
transition_id = 't_LE_antero',
label = 'anterograde transport of LEs & ER',
input_place_ids = ['p_ATP','p_RTN3_PN', 'p_tau'], # didn't connect p_tau yet
firing_condition = fc_t_LE_antero,
reaction_speed_function = r_t_LE_antero, # get later from NPCD
consumption_coefficients = [ATPcons_t_LE_trans, 1, 0], # tune these coefficients based on PD
output_place_ids = ['p_ADP','p_RTN3_axon'],
production_coefficients = [ATPcons_t_LE_trans, 1]) # tune these coefficients based on PD
hfpn.add_transition_with_speed_function(
transition_id = 't_RTN3_aggregation',
label = 'aggregation of monomeric RTN3 into HMW RTN3',
input_place_ids = ['p_RTN3_axon', 'p_RTN3_PN', 'p_Ab'],
firing_condition = fc_t_RTN3_aggregation, # tune aggregation limit later
reaction_speed_function = r_t_RTN3_aggregation,
consumption_coefficients = [1, 1, 0],
output_place_ids = ['p_RTN3_HMW_cyto'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_RTN3_auto',
label = 'functional autophagy of HMW RTN3',
input_place_ids = ['p_RTN3_HMW_cyto', 'p_RTN3_axon'],
firing_condition = fc_t_RTN3_auto,
reaction_speed_function = r_t_RTN3_auto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_auto'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_RTN3_lyso',
label = 'functional delivery of HMW RTN3 to the lysosome',
input_place_ids = ['p_RTN3_HMW_auto', 'p_tau'],
firing_condition = fc_t_RTN3_lyso,
reaction_speed_function = r_t_RTN3_lyso,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_lyso'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_RTN3_dys_auto',
label = 'dysfunctional autophagy of HMW RTN3',
input_place_ids = ['p_RTN3_HMW_cyto', 'p_RTN3_axon'],
firing_condition = fc_t_RTN3_dys_auto,
reaction_speed_function = r_t_RTN3_dys_auto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_dys1'],
production_coefficients = [1]) # tune later when data are incorporated
hfpn.add_transition_with_speed_function(
transition_id = 't_RTN3_dys_lyso',
label = 'dysfunctional delivery of HMW RTN3 to the lysosome',
input_place_ids = ['p_RTN3_HMW_auto', 'p_RTN3_HMW_dys1', 'p_tau'],
firing_condition = fc_t_RTN3_dys_lyso,
reaction_speed_function = r_t_RTN3_dys_lyso,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_RTN3_HMW_dys2'],
production_coefficients = [1]) # tune later when data are incorporated
def add_integrated_energy_metabolism(hfpn):
### Places
hfpn.add_place(it_p_cas3, place_id="p_cas3", label="Active caspase-3", continuous=True)
hfpn.add_place(it_p_ATP, place_id="p_ATP", label="Conc of ATP in mito", continuous=True)
hfpn.add_place(it_p_ADP, place_id="p_ADP", label="Conc of ADP in mito", continuous=True)
hfpn.add_place(it_p_reduc_mito, place_id="p_reduc_mito", label="Conc of reducing agents (NADH, FADH) in mito", continuous=True)
hfpn.add_place(it_p_ROS_mito, place_id="p_ROS_mito", label="Conc of ROS in mito", continuous=True)
hfpn.add_place(it_p_H2O_mito, place_id="p_H2O_mito", label="Conc of H2O in mito", continuous=True)
# Places from other sub-nets
hfpn.add_place(it_p_Ca_mito, "p_Ca_mito", "Ca - mitochondria", continuous = True)
hfpn.add_place(it_p_chol_mito, place_id="p_chol_mito", label="Chol mito", continuous=True)
# Added from other subnets
hfpn.add_place(it_p_Ab, 'p_Ab', 'Abeta')
hfpn.add_place(it_p_LRRK2_mut, "p_LRRK2_mut","LRRK2 - mutated", continuous = True)
hfpn.add_place(it_p_LB, "p_LB", "Lewy body", continuous = True)
hfpn.add_place(it_p_chol_LE, place_id="p_chol_LE", label = "Cholesterol in late endosomes", continuous = True)
hfpn.add_place(it_p_DJ1, "p_DJ1", "DJ1 mutant", continuous = True) # PD specific
# Transitions
hfpn.add_transition_with_speed_function(
transition_id = 't_krebs',
label = 'Krebs cycle',
input_place_ids = ['p_ADP', 'p_Ca_mito', "p_Ab"],
firing_condition = fc_t_krebs,
reaction_speed_function = r_t_krebs,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_reduc_mito', 'p_ATP'],
production_coefficients = [4,1])
hfpn.add_transition_with_speed_function(
transition_id = 't_ATP_hydro_mito',
label = 'ATP hydrolysis by cellular processes',
input_place_ids = ['p_ATP'],
firing_condition = fc_t_ATP_hydro_mito,
reaction_speed_function = r_t_ATP_hydro_mito,
consumption_coefficients = [1],
output_place_ids = ['p_ADP'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_ETC',
label = 'Electron transport chain',
input_place_ids = ['p_reduc_mito', 'p_ADP', 'p_Ca_mito', 'p_ROS_mito', 'p_chol_mito', 'p_Ab'],
firing_condition = fc_t_ETC,
reaction_speed_function = r_t_ETC,
consumption_coefficients = [22/3.96, 440, 0, 0, 0, 0],
output_place_ids = ['p_ATP', 'p_ROS_mito'],
production_coefficients = [440, 0.06])
hfpn.add_transition_with_speed_function(
transition_id = 't_ROS_metab',
label = 'ROS neutralisation',
input_place_ids = ['p_ROS_mito','p_chol_mito','p_LB','p_DJ1'],
firing_condition = fc_t_ROS_metab,
reaction_speed_function = r_t_ROS_metab,
consumption_coefficients = [1,0,0,0],
output_place_ids = ['p_H2O_mito'],
production_coefficients = [1])
# Output transitions: Cas3 for apoptosis
hfpn.add_transition_with_speed_function(
transition_id = 't_mito_dysfunc',
label = 'Mitochondrial complex 1 dysfunction',
input_place_ids = ['p_ROS_mito', 'p_Ab'],
firing_condition = fc_t_mito_dysfunc,
reaction_speed_function = r_t_mito_dysfunc,
consumption_coefficients = [1, 0],
output_place_ids = ['p_cas3'],
production_coefficients = [1])
# Cas3 inactivation
hfpn.add_transition_with_speed_function(
transition_id = 't_cas3_inact',
label = 'Caspase-3 inactivation',
input_place_ids = ['p_cas3'],
firing_condition = fc_t_cas3_inact,
reaction_speed_function = r_t_cas3_inact,
consumption_coefficients = [1],
output_place_ids = [],
production_coefficients = [])
hfpn.add_transition_with_speed_function(
transition_id = 't_ROS_gener_Ab',
label = 'ROS generation by Abeta',
input_place_ids = ['p_Ab'],
firing_condition = fc_t_ROS_gener_Ab,
reaction_speed_function = r_t_ROS_gener_Ab,
consumption_coefficients = [0],
output_place_ids = ["p_ROS_mito"],
production_coefficients = [1])
# NPCD-specific transition
hfpn.add_transition_with_speed_function(
transition_id = 't_TRADD_actv',
label = 'Activation of TRADD',
input_place_ids = ['p_chol_LE'],
firing_condition = fc_t_TRADD_actv,
reaction_speed_function = r_t_TRADD_actv,
consumption_coefficients = [0], #k_t_TRADD_actv * a['p_chol_LE']**(0.2)
output_place_ids = ['p_cas3'],
production_coefficients = [1])
def add_integrated_calcium_homeostasis(hfpn):
### Add places
hfpn.add_place(it_p_Ca_cyto, "p_Ca_cyto", "Ca - cytosol", continuous = True)
hfpn.add_place(it_p_Ca_mito, "p_Ca_mito", "Ca - mitochondria", continuous = True)
hfpn.add_place(it_p_Ca_ER, "p_Ca_ER", "Ca - ER", continuous = True)
hfpn.add_place(it_p_ADP, "p_ADP","ADP - Calcium ER import", continuous = True)
hfpn.add_place(it_p_ATP, "p_ATP","ATP - Calcium ER import", continuous = True)
# Discrete on/of-switches calcium pacemaking
hfpn.add_place(1, "p_Ca_extra", "on1 - Ca - extracellular", continuous = False)
hfpn.add_place(0, "p_on2","on2", continuous = False)
hfpn.add_place(0, "p_on3","on3", continuous = False)
hfpn.add_place(0, "p_on4","on4", continuous = False)
# NPCD-specific
hfpn.add_place(initial_tokens=it_p_Ca_LE, place_id="p_Ca_LE", label="Ca conc in LE", continuous=True)
# Connections to other pathways
hfpn.add_place(it_p_LRRK2_mut, "p_LRRK2_mut","LRRK2 - mutated", continuous = True)
hfpn.add_place(it_p_NPC1_LE, place_id="p_NPC1_LE", label="Mutated NPC1 in LE", continuous=True)
hfpn.add_place(it_p_chol_LE,place_id="p_chol_LE", label="Chol LE", continuous=True)
hfpn.add_place(it_p_PERK_ER, place_id="p_PERK_ER", label="Conc of PERK in ER", continuous=True)
### Add transitions
hfpn.add_transition_with_speed_function(
transition_id = 't_Ca_imp',
label = 'VGCC/NMDA import channels',
input_place_ids = ['p_Ca_extra'],
firing_condition = fc_t_Ca_imp,
reaction_speed_function = r_t_Ca_imp,
consumption_coefficients = [0], # Need to review this
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1]) # Need to review this
hfpn.add_transition_with_speed_function(
transition_id = 't_mCU',
label = 'Ca import into mitochondria via mCU',
input_place_ids = ['p_Ca_cyto', 'p_Ca_mito'],
firing_condition = fc_t_mCU,
reaction_speed_function = r_t_mCU,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_mito'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_MAM',
label = 'Ca transport from ER to mitochondria',
input_place_ids = ['p_Ca_ER', 'p_Ca_mito'],
firing_condition = fc_t_MAM,
reaction_speed_function = r_t_MAM,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_mito'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_RyR_IP3R',
label = 'Ca export from ER',
input_place_ids = ['p_Ca_extra', 'p_Ca_ER', 'p_PERK_ER'],
firing_condition = fc_t_RyR_IP3R,
reaction_speed_function = r_t_RyR_IP3R,
consumption_coefficients = [0,1,0],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_SERCA',
label = 'Ca import to ER',
input_place_ids = ['p_Ca_cyto','p_ATP','p_NPC1_LE'],
firing_condition = fc_t_SERCA,
reaction_speed_function = r_t_SERCA,
consumption_coefficients = [1,0.5,0],
output_place_ids = ['p_Ca_ER','p_ADP'],
production_coefficients = [1,0.5]) # Need to review this
hfpn.add_transition_with_speed_function(
transition_id = 't_NCX_PMCA',
label = 'Ca efflux to extracellular space',
input_place_ids = ['p_Ca_cyto','p_on3','p_NPC1_LE'],
firing_condition = lambda a: a['p_on3']==1,
reaction_speed_function = r_t_NCX_PMCA,
consumption_coefficients = [1,0,0],
output_place_ids = [],
production_coefficients = [])
hfpn.add_transition_with_speed_function(
transition_id = 't_mNCLX',
label = 'Ca export from mitochondria via mNCLX',
input_place_ids = ['p_Ca_mito', 'p_LRRK2_mut'],
firing_condition = fc_t_mNCLX,
reaction_speed_function = r_t_mNCLX,
consumption_coefficients = [1, 0],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1])
# Discrete on/of-switches calcium pacemaking
hfpn.add_transition_with_speed_function(
transition_id = 't_A',
label = 'A',
input_place_ids = ['p_on4'],
firing_condition = lambda a: a['p_on4']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_Ca_extra'],
production_coefficients = [1],
delay=0.5)
hfpn.add_transition_with_speed_function(
transition_id = 't_B',
label = 'B',
input_place_ids = ['p_Ca_extra'],
firing_condition = lambda a: a['p_Ca_extra']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on2'],
production_coefficients = [1],
delay=0.5)
hfpn.add_transition_with_speed_function(
transition_id = 't_C',
label = 'C',
input_place_ids = ['p_on2'],
firing_condition = lambda a: a['p_on2']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on3'],
production_coefficients = [1],
delay=0)
hfpn.add_transition_with_speed_function(
transition_id = 't_D',
label = 'D',
input_place_ids = ['p_on3'],
firing_condition = lambda a: a['p_on3']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on4'],
production_coefficients = [1],
delay=0.5)
# Link to energy metabolism in that it needs ATP replenishment
hfpn.add_transition_with_mass_action(
transition_id = 't_NaK_ATPase',
label = 'NaK ATPase',
rate_constant = k_t_NaK_ATPase,
input_place_ids = ['p_ATP', 'p_on3'],
firing_condition = lambda a: a['p_on3']==1,
consumption_coefficients = [1,0],
output_place_ids = ['p_ADP'],
production_coefficients = [1])
# NPCD-specific transitions
hfpn.add_transition_with_speed_function(
transition_id = 't_Ca_cyto_LE',
label = 'NAADP facilitated transport of Ca from cyto to LE',
input_place_ids = ['p_NPC1_LE', 'p_Ca_cyto'],
firing_condition = fc_t_Ca_cyto_LE,
reaction_speed_function = r_t_Ca_cyto_LE,
consumption_coefficients = [0,1],
output_place_ids = ['p_Ca_LE'],
production_coefficients = [1])
hfpn.add_transition_with_speed_function(
transition_id = 't_TRPML1',
label = 'TRPML1 facilitated transport of Ca from LE to cyto',
input_place_ids = ['p_Ca_LE', 'p_chol_LE'],
firing_condition = fc_t_TRPML1,
reaction_speed_function = r_t_TRPML1,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1])
def add_therapeutics(hfpn):
## Places
hfpn.add_place(initial_tokens=it_p_NAC, place_id="p_NAC", label="Conc of NAC", continuous=True)
hfpn.add_transition(transition_id = 't_ROS_metab',
label = 'Oxidation of proteins/lipids involved in ETC',
input_place_ids = ['p_ROS_mito', 'p_chol_mito', 'p_NAC'],
firing_condition = fc_t_ROS_metab,
consumption_speed_functions = [lambda a : k_t_ROS_metab * a['p_ROS_mito'] / a['p_chol_mito']**(1.5) * (a['p_NAC'] == 0) + k_t_ROS_metab * a['p_ROS_mito'] / a['p_chol_mito']**(1.5) * (1 + a['p_NAC'] / 1000) * (a['p_NAC'] > 0), lambda a : 0, lambda a : 0],
output_place_ids = ['p_H2O_mito'],
production_speed_functions = [lambda a : k_t_ROS_metab * a['p_ROS_mito'] / a['p_chol_mito']**(1.5) * (a['p_NAC'] == 0) + k_t_ROS_metab * a['p_ROS_mito'] / a['p_chol_mito']**(1.5) * a['p_NAC'] / 1000 * (a['p_NAC'] > 0)])
#NAC does NOT describe the molecule count but rather a artificial concentration in which one toke will increase the rate of ROS_metab by 0.1%
| [
"[email protected]"
] | |
8096ec71ed9d6f5d130eb22797cc799e0e2853b4 | 587eed32179cdbf10b4626a3a238d5923af7459c | /deep_learning/eager.py | 843bc8349093cfc3f5e0b2f8652b017bd5de8a5b | [] | no_license | shushanxingzhe/python_learning | fad57fcf607efe81f769d276a6c4390726c5f1bb | 7fe26ad612790328edab1627e6c1c144a21d79fc | refs/heads/master | 2022-09-12T15:11:50.377981 | 2022-08-04T05:43:51 | 2022-08-04T05:43:51 | 96,225,151 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
a = [1,2,3,4,5,6]
print(a[:-2])
print(a[2:])
tf.keras.layers.Embedding()
exit()
tf.enable_eager_execution()
tf.logging.set_verbosity(tf.logging.ERROR)
tf.set_random_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
dftrain['class'].value_counts().plot(kind='barh')
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return fc.indicator_column(fc.categorical_column_with_vocabulary_list(feature_name, vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(fc.numeric_column(feature_name, dtype=tf.float32))
example = dftrain.head(1)
class_fc = one_hot_cat_column('class', ('First', 'Second', 'Third'))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', fc.input_layer({'class': pd.Series(['Second'])}, [class_fc]).numpy())
tf.data.Dataset.from_tensor_slices()
tf.keras.preprocessing.image.ImageDataGenerator()
tf.keras.layers.Lambda()
plt.subplot()
tf.keras.applications.vgg19.VGG19()
tf.nn.rnn_cell.MultiRNNCell()
tf.estimator.Estimator()
tf.keras.Model()
tf.estimator.DNNClassifier()
tf.layers.dense()
tf.train.shuffle_batch()
| [
"[email protected]"
] | |
1053793324eeab8a4a8b7f8c0312e7bda71cd5f4 | f1f7781fd5290e4755dc0d451caeacdceb022de2 | /dataviva/apps/scholar/models.py | 7c4caadf6c407782cda8ebc1a0cc0ebb3a5fea62 | [
"MIT"
] | permissive | jaotta/dataviva-site | 62a4f73d288c68bbb469155dfcb93dbc992fc98b | 663d98b1c692bf2f5f22e4a71564d104396fff46 | refs/heads/master | 2021-01-18T01:24:10.321142 | 2016-05-17T18:13:57 | 2016-05-17T18:13:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | from dataviva import db
from sqlalchemy import ForeignKey
article_keyword_table = db.Table(
'scholar_article_keyword',
db.Column('article_id', db.Integer(), db.ForeignKey('scholar_article.id')),
db.Column('keyword_id', db.Integer(), db.ForeignKey('scholar_keyword.id'))
)
class Article(db.Model):
__tablename__ = 'scholar_article'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(400))
abstract = db.Column(db.Text())
theme = db.Column(db.String(250))
postage_date = db.Column(db.DateTime)
approval_status = db.Column(db.Boolean)
authors = db.relationship('AuthorScholar', backref='scholar_article', lazy='eager')
keywords = db.relationship('KeyWord', secondary=article_keyword_table)
def authors_str(self):
author_names = [author.name for author in self.authors]
return ', '.join(author_names)
def keywords_str(self):
keyword_names = [keyword.name for keyword in self.keywords]
return ', '.join(keyword_names)
def date_str(self):
return self.postage_date.strftime('%d/%m/%Y')
def __repr__(self):
return '<Article %r>' % (self.title)
class AuthorScholar(db.Model):
__tablename__ = 'scholar_author'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
article_id = db.Column(db.Integer, ForeignKey('scholar_article.id'))
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<AuthorScholar %r>' % (self.name)
class KeyWord(db.Model):
__tablename__ = 'scholar_keyword'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<Keyword %r>' % (self.name)
| [
"[email protected]"
] | |
26420ea46b9cbaa38d39ae739afaead6359c11e5 | 3f395a44a8e1255de5eebfe936c170fd2b1e36ef | /tkinter_for_cards.py | b703acb98fcc5914fc97140f0d91b457c7af0a8c | [] | no_license | cmd16/game-bs | cf094d14c0a1ec3f3f7812bdb7f0b3ecf2fe2a3c | 6cd89a393dcb90f4fd97025ef93472690f74f0d5 | refs/heads/master | 2020-06-21T10:07:29.492562 | 2016-12-21T18:40:46 | 2016-12-21T18:40:46 | 74,793,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,097 | py | """Catherine DeJager (cmd38)
12/15/2016
CS 106 Final Project: BS
A widget to set up the game by getting user input (names, computer player options, etc.) and then creating Player
objects using the given input as parameters."""
from tkinter import *
from player import *
from world import *
from deck import *
class SetupWidget:
"""A widget to set up the game by getting user input (names, computer player options, etc.) and then creating Player
objects using the given input as parameters."""
def __init__(self, world=None, master=None):
self._window = master
self._window.title("Setup")
self._world = world
self._playername = StringVar()
self._playerentry = Entry(self._window, textvariable=self._playername)
self._playerentry.pack()
self._cpuvar = IntVar()
self._cpuoption = Checkbutton(self._window, variable=self._cpuvar, text='Computer player?', command=self.toggleCpuOptions)
self._playername.set("Enter name here")
self._addplayerbutton = Button(self._window, text="Add player", command=self.createPlayer)
self._addplayerbutton.pack(side=BOTTOM)
self._invalidlabel = Label(self._window)
self._validlabel = Label(self._window)
self._difficultyvar = IntVar()
self._difficultyslider = Scale(self._window, variable=self._difficultyvar, label='Difficulty level:',
orient=HORIZONTAL, from_=1, to=5, sliderlength=15)
self._riskvar = IntVar()
self._riskslider = Scale(self._window, variable=self._riskvar, label='Risk level:',
orient=HORIZONTAL, from_=1, to=5, sliderlength=15)
self._randomvar = IntVar()
self._randomslider = Scale(self._window, variable=self._randomvar, label='Randomness level:', orient=HORIZONTAL,
from_=1, to=5, sliderlength=15)
self._pbvar = BooleanVar()
self._pbcheckbox = Checkbutton(self._window, variable=self._pbvar, text='Do you want the computer to tell you when it has successfully lied?')
self._verbosevar = BooleanVar()
self._verbosecheckbox = Checkbutton(self._window, variable=self._verbosevar, text='Turn on verbose output?')
self._verbosecheckbox.pack()
self._cpuoption.pack()
def createPlayer(self): # first need to check that all inputs are valid
"""Checks that the user has entered valid input. If the input is invalid, show an error message.
If the input is valid, create a player with the given input and add it to self's world's list of players."""
self._invalidlabel.pack_forget()
self._validlabel.pack_forget()
if not self._playername.get().isalpha(): # check to see if the name is alphanumeric characters
self._invalidlabel['text'] = "Invalid input: please use only letters with no spaces."
self._invalidlabel.pack()
return
for item in self._world.getPlayerList():
if self._playername.get() == item.name:
self._invalidlabel['text'] = "Invalid input: another player already has the name %s." % item.name
self._invalidlabel.pack()
return
self._addplayerbutton['state'] = NORMAL
self._validlabel.config(text='Player added: ' + self._playername.get())
self._validlabel.pack()
if self._cpuvar.get() == 1:
self._world.createPlayer(self._playername.get(), self._verbosevar.get(), self._difficultyvar.get(),
self._riskvar.get(), self._pbvar.get(), self._randomvar.get())
else:
self._world.createPlayer(self._playername.get(), self._verbosevar.get())
def toggleCpuOptions(self):
"""If the cpu checkbox is checked, show the cpu options. If the cpu checkbox is unchecked, hide the cpu options."""
if self._cpuvar.get() == 1:
self._difficultyslider.pack()
self._riskslider.pack()
self._pbcheckbox.pack()
self._randomslider.pack()
else:
self._difficultyslider.pack_forget()
self._riskslider.pack_forget()
self._pbcheckbox.pack_forget()
self._randomslider.pack_forget()
def printcpuvalues(self):
"""Prints the values of the input for the cpu options. This method used for debugging only."""
print('difficulty:', self._difficultyvar.get(), 'risk:', self._riskvar.get(), 'pb:', self._pbvar.get(),
'verbose:', self._verbosevar.get())
if "__name__" == "__main__":
this_world = World()
this_world._playerlist.append(Player("Joe"))
root = Tk()
myApp = SetupWidget(world=this_world, master=root)
root.mainloop()
print(myApp._world.getPlayerNameStrings())
for item in this_world._playerlist:
print(type(item))
print(myApp._world.getCpuStrings())
myApp.printcpuvalues()
#else:
this_world = World()
root = Tk()
myApp = SetupWidget(world=this_world, master=root)
root.mainloop()
| [
"[email protected]"
] | |
6ff5dcd49a138f3324b728e08c573bab44fb4edb | d051f3fe9fda31b72fa0ddce67aa1f4293c7c37c | /cloud/inference/prune_idle_machines.py | a22ef00461da7c89de8be940f0c2e86b4b3a221f | [
"BSD-3-Clause"
] | permissive | davmre/sigvisa | 4e535215b6623310d8f5da64258f6fa9a378f9fd | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | refs/heads/master | 2021-03-24T10:24:52.307389 | 2018-01-05T19:33:23 | 2018-01-05T19:33:23 | 2,321,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | import os
import subprocess32 as subprocess
import time
import numpy as np
import shutil
import uuid
import cPickle as pickle
from optparse import OptionParser
from sigvisa.utils.fileutils import mkdir_p
from sigvisa.learn.extract_fits_from_jointgp import extract_jointgp_fits
from sigvisa.cloud.inference.remote_job_management import running_processes
from collections import defaultdict
from fabric.api import env
env.user = 'vagrant'
env.key_filename = '/home/dmoore/.ssh/fabric_is_terrible.key'
def get_idle_hosts(jobfile):
with open(jobfile, "rb") as f:
jobs = pickle.load(f)
pids_by_host = defaultdict(set)
for (jobid, cmd, sta, evidfile, host, pid) in jobs:
pids_by_host[host].add(pid)
idle_hosts = []
for host, pids in pids_by_host.items():
try:
running_pids = running_processes(host)
except:
print "no response from", host, "skipping"
continue
alive=False
for pid in pids:
if pid in running_pids:
alive = True
break
if not alive:
idle_hosts.append(host)
return idle_hosts
def main():
parser = OptionParser()
parser.add_option("--jobfile", dest="jobfile", default="", type="str",
help="file in which to record job progress")
(options, args) = parser.parse_args()
idle_hosts = get_idle_hosts(options.jobfile)
for host in idle_hosts:
print host
print "done"
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d8570678c24b0c3f9928ad28d6c32dc0f742e9ff | 8c72454604a27bd607df55bb7f9fcc65e83d4428 | /3_trainer/classification_models/resnet152_train.py | bb5b254a96dd2a27627fc711b5b8808761f856a5 | [] | no_license | Guluna/vbd_cxr | 8c8adcb634769c6ea87bf5b4a5b267772b888d58 | 6519c9080effea89a0ff79b71a05d203fcc849ea | refs/heads/main | 2023-04-03T15:35:33.581852 | 2021-04-16T06:24:34 | 2021-04-16T06:24:34 | 358,491,349 | 0 | 0 | null | 2021-04-16T05:55:58 | 2021-04-16T05:55:57 | null | UTF-8 | Python | false | false | 15,423 | py | # %% --------------------
import os
import sys
from datetime import datetime
from dotenv import load_dotenv
# local
# env_file = "D:/GWU/4 Spring 2021/6501 Capstone/VBD CXR/PyCharm " \
# "Workspace/vbd_cxr/6_environment_files/local.env "
# cerberus
env_file = "/home/ssebastian94/vbd_cxr/6_environment_files/cerberus.env"
load_dotenv(env_file)
# add HOME DIR to PYTHONPATH
sys.path.append(os.getenv("HOME_DIR"))
# %% --------------------START HERE
# https://www.kaggle.com/corochann/vinbigdata-2-class-classifier-complete-pipeline
import albumentations
from common.classifier_models import initialize_model, get_param_to_optimize, \
set_parameter_requires_grad
from common.utilities import UnNormalize
import numpy as np
import torch
import torchvision
from torch.utils.data import DataLoader, WeightedRandomSampler
from common.CustomDatasets import VBD_CXR_2_Class_Train
from collections import Counter
from torch.utils import tensorboard
from pathlib import Path
import shutil
# %% --------------------set seeds
# seed = 42
# torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
# np.random.seed(seed)
# random.seed(seed)
# torch.backends.cudnn.deterministic = True
# %% --------------------DIRECTORIES and VARIABLES
IMAGE_DIR = os.getenv("IMAGE_DIR")
MERGED_DIR = os.getenv("MERGED_DIR")
SAVED_MODEL_DIR = os.getenv("SAVED_MODEL_DIR")
TENSORBOARD_DIR = os.getenv("TENSORBOARD_DIR")
# model name
model_name = "resnet152"
# %% --------------------TENSORBOARD DIRECTORY INITIALIZATION
train_tensorboard_dir = f"{TENSORBOARD_DIR}/2_class_classifier/{model_name}/train"
validation_tensorboard_dir = f"{TENSORBOARD_DIR}/2_class_classifier/{model_name}/validation"
# if logs already exist then delete them
train_dirpath = Path(train_tensorboard_dir)
if train_dirpath.exists() and train_dirpath.is_dir():
shutil.rmtree(train_dirpath)
validation_dirpath = Path(validation_tensorboard_dir)
if validation_dirpath.exists() and validation_dirpath.is_dir():
shutil.rmtree(validation_dirpath)
# create new tensorboard events directories
train_writer = tensorboard.SummaryWriter(train_tensorboard_dir)
validation_writer = tensorboard.SummaryWriter(validation_tensorboard_dir)
# %% --------------------Data transformations using albumentations
# https://albumentations.ai/docs/api_reference/augmentations/transforms/
train_transformer = albumentations.Compose([
# augmentation operations
albumentations.augmentations.transforms.RandomBrightnessContrast(p=0.3),
albumentations.augmentations.transforms.ShiftScaleRotate(rotate_limit=5, p=0.4),
# horizontal flipping
albumentations.augmentations.transforms.HorizontalFlip(p=0.4),
# resize operation
albumentations.Resize(height=512, width=512, always_apply=True),
# this normalization is performed based on ImageNet statistics per channel
# mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
albumentations.augmentations.transforms.Normalize()
])
validation_transformer = albumentations.Compose([
# resize operation
albumentations.Resize(height=512, width=512, always_apply=True),
# this normalization is performed based on ImageNet statistics per channel
# mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
albumentations.augmentations.transforms.Normalize()
])
# %% --------------------DATASET
# 1 = abnormal
# 0 = normal
train_data_set = VBD_CXR_2_Class_Train(IMAGE_DIR,
MERGED_DIR + "/512/unmerged/90_percent_train"
"/2_class_classifier"
"/90_percent/train_df.csv",
majority_transformations=train_transformer)
validation_data_set = VBD_CXR_2_Class_Train(IMAGE_DIR,
MERGED_DIR + "/512/unmerged/90_percent_train"
"/2_class_classifier"
"/10_percent/holdout_df.csv",
majority_transformations=validation_transformer)
# %% --------------------WEIGHTED RANDOM SAMPLER
# weighted random sampler to handle class imbalance
# https://discuss.pytorch.org/t/how-to-handle-imbalanced-classes/11264/2
# Get all the target classes
target_list = train_data_set.targets
# get the count
class_counts = Counter(target_list)
# Get the class weights. Class weights are the reciprocal of the number of items per class
class_weight = 1. / np.array(list(class_counts.values()))
# assign weights to each target
target_weight = []
for t in target_list:
target_weight.append(class_weight[int(t)])
# create sampler based on weights
sampler = WeightedRandomSampler(weights=target_weight, num_samples=len(train_data_set),
replacement=True)
# %% --------------------DATALOADER
BATCH_SIZE = 8
workers = int(os.getenv("NUM_WORKERS"))
# # perform weighted random sampler for training only. NOTE: sampler shuffles the data by default
train_data_loader = torch.utils.data.DataLoader(
train_data_set, batch_size=BATCH_SIZE, num_workers=workers, sampler=sampler)
validation_data_loader = torch.utils.data.DataLoader(
validation_data_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=workers)
# %% --------------------WEIGHTS FOR LOSS
target_list = train_data_set.targets
class_counts = Counter(target_list)
# https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html
# negative/positive, thus giving more weightage to positive class
pos_weight = class_counts[0] / class_counts[1]
# convert to Tensor vector where number of elements in vector = number of classes
# we have 1 class: 0 = Normal, 1 = Abnormal
pos_weight = torch.as_tensor(pos_weight, dtype=float)
# %% --------------------CRITERION
# https://stackoverflow.com/questions/53628622/loss-function-its-inputs-for-binary-classification-pytorch
# https://visualstudiomagazine.com/Articles/2020/11/04/pytorch-training.aspx?Page=2
# https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
# %% --------------------UnNormalize
unnormalizer = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
# %% --------------------
# OVERFITTER
# train_image_ids, train_image, train_target = iter(train_data_loader).next()
# validation_image_ids, validation_image, validation_target = iter(validation_data_loader).next()
# %% --------------------
# define device
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)
# %% --------------------MODEL INSTANCE
# feature_extract_param = True means all layers frozen except the last user added layers
# feature_extract_param = False means all layers unfrozen and entire network learns new weights
# and biases
feature_extract_param = True
# 0 = normal CXR or 1 = abnormal CXR
# single label binary classifier
num_classes = 1
# input_size is minimum constraint
model, params_to_update = initialize_model(model_name, num_classes, feature_extract_param,
use_pretrained=True)
# %% --------------------HYPER-PARAMETERS
TOTAL_EPOCHS = 30
REDUCED_LR = 1e-4
# for first 5 EPOCHS train with all layers frozen except last, after that train with lowered LR
# with all layers unfrozen
INITIAL_EPOCHS = 5
INITIAL_LR = REDUCED_LR * 100
# %% --------------------OPTIMIZER
optimizer = torch.optim.Adam(params_to_update, lr=INITIAL_LR)
# %% --------------------move to device
model.to(device)
# %% --------------------TRAINING LOOP
print_iteration_frequency = 50
freeze_all_flag = True
train_iter = 0
val_iter = 0
# track train loss, validation loss, train accuracy, validation accuracy
val_acc_history_arr = []
train_acc_history_arr = []
train_loss_arr = []
valid_loss_arr = []
# condition to save model
lowest_loss = 10000000
best_model_found_epoch = 0
# if directory does not exist then create it
saved_model_dir = Path(f"{SAVED_MODEL_DIR}/2_class_classifier/{model_name}")
if not saved_model_dir.exists():
os.makedirs(saved_model_dir)
saved_model_path = f"{saved_model_dir}/{model_name}.pt"
print("Program started")
# start time
start = datetime.now()
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
for epoch in range(TOTAL_EPOCHS):
print('Epoch {}/{}'.format(epoch, TOTAL_EPOCHS - 1))
print('-' * 10)
if (epoch >= INITIAL_EPOCHS) and freeze_all_flag:
# do below tasks only once when current epoch > INITIAL EPOCH
freeze_all_flag = False
# unfreeze all layers
set_parameter_requires_grad(model, False)
# get parameters to optimize
params = get_param_to_optimize(model, False)
# update optimizer and reduce LR
optimizer = torch.optim.Adam(params, lr=REDUCED_LR)
# ----------------------TRAINING PHASE----------------------
model.train()
# to track loss and accuracy for training phase at iteration level
running_loss = 0.0
running_corrects = 0
# image grid flag
train_epoch_flag = True
# iterate the data
# overfitting code
# for _, images, targets in zip([train_image_ids], [train_image], [train_target]):
for _, images, targets in train_data_loader:
# send the input to device
images = images.to(device)
targets = targets.to(device)
# visualize only the first batch in epoch to tensorboard
if train_epoch_flag:
# revert the normalization
unnormalized_images = unnormalizer(images)
# add images to tensorboard
img_grid = torchvision.utils.make_grid(unnormalized_images)
train_writer.add_image("train", img_tensor=img_grid, global_step=epoch)
# turn off image_flag for other batches for current epoch
train_epoch_flag = False
# optimizer.zero_grad() is critically important because it resets all weight and bias
# gradients to 0
# we are updating W and B for each batch, thus zero the gradients in each batch
optimizer.zero_grad()
# forward pass
# track the history in training mode
with torch.set_grad_enabled(True):
# make prediction
outputs = model(images)
# find loss
train_loss = criterion(outputs.view(-1), targets)
# converting logits to probabilities and keeping threshold of 0.5
# https://discuss.pytorch.org/t/multilabel-classification-how-to-binarize-scores-how-to-learn-thresholds/25396
preds = (torch.sigmoid(outputs.view(-1)) > 0.5).to(torch.float32)
# loss.backward() method uses the back-propagation algorithm to compute all the
# gradients associated with the weights and biases that a part of the network
# containing loss_val
train_loss.backward()
# optimizer.step() statement uses the newly computed gradients to update all the
# weights and biases in the neural network so that computed output values will get
# closer to the target values
optimizer.step()
# iteration level statistics
running_loss += train_loss.item() * images.size(0)
running_corrects += torch.sum(preds == targets.data)
if train_iter % print_iteration_frequency == 0:
print(
f'Train Iteration #{train_iter}:: {train_loss.item()} Acc: {torch.sum(preds == targets.data).item() / (len(preds))}')
train_iter += 1
# epoch level statistics take average of batch
epoch_loss = running_loss / len(train_data_loader.dataset)
epoch_acc = running_corrects.double() / len(train_data_loader.dataset)
print('Epoch Training:: Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
# track using tensorboard
train_writer.add_scalar("loss", epoch_loss, global_step=epoch)
train_writer.add_scalar("accuracy", epoch_acc, global_step=epoch)
train_loss_arr.append(epoch_loss)
train_acc_history_arr.append(epoch_acc.item())
# ----------------------VALIDATION PHASE----------------------
# https://visualstudiomagazine.com/Articles/2020/11/24/pytorch-accuracy.aspx?Page=2
model.eval()
# to track loss and accuracy for validation phase at iteration level
running_loss = 0.0
running_corrects = 0
# iterate the data
# overfitting code
# for _, images, targets in zip([validation_image_ids], [validation_image], [validation_target]):
for _, images, targets in validation_data_loader:
# send the input to device
images = images.to(device)
targets = targets.to(device)
# forward pass
# dont track the history in validation mode
with torch.set_grad_enabled(False):
# make prediction
outputs = model(images)
# find loss
val_loss = criterion(outputs.view(-1), targets)
# converting logits to probabilities and keeping threshold of 0.5
# https://discuss.pytorch.org/t/multilabel-classification-how-to-binarize-scores-how-to-learn-thresholds/25396
preds = (torch.sigmoid(outputs.view(-1)) > 0.5).to(torch.float32)
# iteration level statistics
running_loss += val_loss.item() * images.size(0)
running_corrects += torch.sum(preds == targets.data)
if val_iter % print_iteration_frequency == 0:
print(
f'Validation Iteration #{val_iter}:: {val_loss.item()} Acc: {torch.sum(preds == targets.data).item() / (len(preds))}')
val_iter += 1
# epoch level statistics take average of batch
epoch_loss = running_loss / len(validation_data_loader.dataset)
epoch_acc = running_corrects.double() / len(validation_data_loader.dataset)
print('Epoch Validation:: Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
# track using tensorboard
validation_writer.add_scalar("loss", epoch_loss, global_step=epoch)
validation_writer.add_scalar("accuracy", epoch_acc, global_step=epoch)
valid_loss_arr.append(epoch_loss)
val_acc_history_arr.append(epoch_acc.item())
# save model based on lowest epoch validation loss
if epoch_loss <= lowest_loss:
lowest_loss = epoch_loss
best_model_found_epoch = epoch
# save model state based on best val accuracy per epoch
# https://debuggercafe.com/effective-model-saving-and-resuming-training-in-pytorch/
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': criterion,
}, saved_model_path)
print('Lowest Validation Acc: {:4f} at epoch:{}'.format(lowest_loss, best_model_found_epoch))
print("End time:" + str(datetime.now() - start))
print("Program Complete")
# tensorboard cleanup
train_writer.flush()
validation_writer.flush()
train_writer.close()
validation_writer.close()
# %% --------------------
# Average Loss
print("Average Train Loss:" + str(np.mean(train_loss_arr)))
print("Average Validation Loss:" + str(np.mean(valid_loss_arr)))
# Average Accuracy
print("Average Train Accuracy:" + str(np.mean(train_acc_history_arr)))
print("Average Validation Accuracy:" + str(np.mean(val_acc_history_arr)))
| [
"[email protected]"
] | |
b6e6c452b9b6e587e65296e1eeab0b78f2e8931e | 0f0cb36c4d0afdd36072be37238b60a040623fdd | /cmds/task.py | b9b188d9fcd969f8d01d7b54852b6759b4124764 | [] | no_license | hijimmy87/dcCWBReportBot | 484cb5f99ab4a88a297de1bf0e6696e46d4c8259 | 634c1b850556d50c4b1715b2d8171d7e10d94a30 | refs/heads/main | 2023-08-11T14:57:59.511470 | 2021-09-30T13:24:57 | 2021-09-30T13:24:57 | 412,072,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | import discord
from discord.ext import commands
from core import Cog_Extension
from api.weather import getCWB
import asyncio, json
from datetime import datetime, timedelta
class task(Cog_Extension):
def __init__(self, bot):
super().__init__(bot)
async def weather():
await self.bot.wait_until_ready()
with open('time.json', 'r', encoding = 'utf-8') as time:
time = json.load(time)
channel = bot.get_channel(self.channels['weather'])
while not bot.is_closed():
for datail in ['E-A0015-001', 'E-A0016-001']:
data = getCWB(datail)
if data is not None and data['time'] != time[datail]:
time[datail] = data['time']
content = data['content']
embed = discord.Embed(color = self.color())
embed.title = data['title']
embed.set_image(url = data['image'])
embed.set_footer(**data['footer'])
for field in data['field']:
embed.add_field(**field)
await channel.send(content, embed=embed)
await asyncio.sleep(1)
for datail in ['W-C0033-002']:
data = getCWB(datail)
if data is not None and data['time'] != time[datail]:
time[datail] = data['time']
embed = discord.Embed(
title = data['title'],
description = data['description'],
timestamp = data['timestamp'],
color = self.color()
)
embed.set_footer(**data['footer'])
for field in data['field']:
embed.add_field(**field)
await channel.send(embed=embed)
await asyncio.sleep(1)
with open('time.json', 'w', encoding = 'utf-8') as file:
json.dump(time, file, indent = 4)
async def tellTime():
def is_me(m):
return m.author == bot.user
await self.bot.wait_until_ready()
while not bot.is_closed():
now = datetime.utcnow() + timedelta(hours = 8)
if now.minute % 5 == 0 and now.second <= 5:
channel = bot.get_channel(self.channels["time"])
await channel.purge(limit = 1, check = is_me)
await channel.send(now.strftime('%H 時 %M 分 %S 秒'))
await asyncio.sleep(5)
await asyncio.sleep(1)
async def morning():
await self.bot.wait_until_ready()
while not bot.is_closed():
now = datetime.utcnow() + timedelta(hours = 8)
if now.hour == 6 and now.minute == 0 and now.second <= 5:
channel = bot.get_channel(self.channels["morning"])
await channel.send('おはよう世界\nGood Morning World!')
await asyncio.sleep(5)
await asyncio.sleep(1)
self.bot.loop.create_task(weather())
# self.bot.loop.create_task(tellTime())
self.bot.loop.create_task(morning())
def setup(bot):
bot.add_cog(task(bot)) | [
"[email protected]"
] | |
68076011454e47960d4ce0e65fcbc57c1ec41a99 | e6f9cb89af849f52fd318d467d91abbf198f2ea6 | /B_MOO_004_NSGA3_0810_PS/d_NSGA2_PMROA_no_encoding_pandas_numpy.py | a5ac99da00d494ffd4c4e6164bba551184446656 | [] | no_license | chinawindofmay/multi-objective-optimization-NSGA2 | 5cf0319f5a04e27d2adbe47c3ca11bee83c8083a | 5fb711d909967c61ab423fd3cc30c09065b44d3d | refs/heads/master | 2021-07-19T10:38:36.458471 | 2021-07-03T07:54:49 | 2021-07-03T07:54:49 | 240,712,509 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,824 | py | #coding:utf-8
"""
关于整个算法的基本说明:
算法功能:实现对可达性和公平性的帕累托解求解过程。
算法参照:
# Program Name: NSGA-II.py
# Description: This is a python implementation of Prof. Kalyanmoy Deb's popular NSGA-II algorithm
# Author: Haris Ali Khan
# Supervisor: Prof. Manoj Kumar Tiwari
算法的改造过程:
第一步:20200129-20200130,资料收集,NSGA基本原理再理解,调试原有的代码(发现没法验证对错)于是乎github上重新寻找代码
理解基础的NSGA-II.py,单个x,两个简单数学函数目标,数学制图,详细见SGAII-traditional.py
第二步:20200131,将NSGA-II.py算法改成求解x1,x2,三个数学曲面目标,数学制图,详细见NSGAII-math-valiation.py
这一步非常重要,因为这一步决定了对算法理解和改造的基础正确性问题
实现了三个曲面的求解,验证了准确性,且实现了制图表达,便于接下来的改造
第三步:20200201-20200202,改成:26个x,可达性和公平性两个数学目标,需要修改initial_population、crossover、mutation、fitness、accessbility数据读取等
过程中主要解决了几个问题:limitation改造问题;fitness计算慢的问题;去掉了编码和解码的过程(从而简化了问题的可读性)
测试50种群,300代的结果
将小数位结果整数化
2030医生数量预测
第四步:将原有基于Python list的存储和运算机制修改为基于pandas和Numpy的机制,从而增加运行效率
2020年3月8日,完成了pandas和numpy的存储和修改,发现pandas效率为一次运行fitness时间在10S
晚上,鼓起勇气,将其修改为numpy的,具体查看图片:d_numpy存储机制.jpg,一次运行fitness的时间在0.03S
第五步:重新思考确立论文题目:高数量Or高质量 医疗资源优化配置帕累托解求解过程,将马太效应和分级医疗引入到本次的论文题目中
阅读论文,增加第三个评价指标,
目前需要做四件事情:整理代码并提交到Github、分级医疗的基础数据检查、Huff模型再审视、修改模型的代码
第五步:思考变长NSGA;
第六步:思考Q-NSGA2;
第七步:思考其他的目标优化问题求解;
"""
import random
import matplotlib.pyplot as plt
import numpy as np
from e_fitness_gravity_equality import *
from numpy import *
from sklearn import preprocessing
import a_mongo_operater
import time
"""
用于初始化Population
#softmax函数
"""
def softmax_function(t_list):
try:
z_exp = [math.exp(i) for i in t_list]
sum_z_exp = sum(z_exp)
softmax_list = [round(i / sum_z_exp, 3) for i in z_exp]
return np.array(softmax_list)
except:
return
"""
生成随机分配概率List
"""
def softmax_random():
x_list=np.array([random.random() for i in range(0, SOLUTION_LEN)])
list_z_score = preprocessing.scale(1 / x_list) # 这里1和10和100对其结果没有影响
# print("z_score", list_z_score)
softmax_probability = softmax_function(list_z_score)
return softmax_probability
"""
名称:初始化种群函数
作用:初始化原始种群
"""
def initial_population():
population = np.empty(shape=[POP_SIZE, SOLUTION_LEN],dtype=np.float32)
for i in range(POP_SIZE):
solution=np.array([round(LIMINTATION * probability, 3) for probability in softmax_random()],dtype=np.float32)
population[i,:]=solution
return population
"""
交叉算子,实现两个solution交叉
"""
def crossover_mutation_limitation(solution_a, solution_b):
##第1列和第2列存放概率,第3存放交换结果,第4列出发1或0,表示TRUE和FALSE,
new_solution_sign = np.random.random((SOLUTION_LEN,4))
#第一步交叉
for i in range(0, SOLUTION_LEN):
if new_solution_sign[i,0]<CROSSOVER_PROB_THRESHOLD:
new_solution_sign[i,2]=solution_b[i]
new_solution_sign[i,3]=1
else:
new_solution_sign[i, 2] = solution_a[i]
new_solution_sign[i, 3] = 0
# 第二步变异
for i in range(0, SOLUTION_LEN):
if new_solution_sign[i,1] < MUTATION_PROB__THRESHOLD:
x = round(MIN_X + (MAX_X - MIN_X) * random.random()+0.1,3)
new_solution_sign[i, 2] =x
new_solution_sign[i, 3] =1
#第三步 求解总值
sum_x = np.sum(new_solution_sign[:,2])
sum_no_adjust_x = 0
for i in range(0, SOLUTION_LEN):
if new_solution_sign[i][3] == 0:
sum_no_adjust_x+=new_solution_sign[i,2]
#第四步:完成交叉和变异之后,要对整体做一次Limitation
adjust_parameter= (LIMINTATION - sum_no_adjust_x) / (sum_x - sum_no_adjust_x)
new_solution = np.empty(shape=(SOLUTION_LEN,), dtype=np.float32)
#adjust_parameter>1,则表示要对TRUE的x做放大操作,乘
#adjust_parameter<1,则表示要对TRUE的x做缩小操作,乘
for i in range(0, SOLUTION_LEN):
if new_solution_sign[i,3]==1:
#开始调整数值
new_solution[i] = round(new_solution_sign[i,2]*adjust_parameter,3)
else:
new_solution[i] = new_solution_sign[i,2]
return new_solution
"""
基于Numpy存储,计算适应度,包括了可达性和公平性两个子函数
"""
def fitness_numpy(demands_numpy, population,DEMANDS_COUNT,access_type):
y1_values_double = np.empty(shape=(np.shape(population)[0],),dtype=np.float32)
y2_values_double = np.empty(shape=(np.shape(population)[0],),dtype=np.float32)
for i_solution in range(np.shape(population)[0]):
#这一步计算出了当前solution下,每个医院的gravtiy值
solution=population[i_solution,:]
#开始时间
# start_time = time.time()
if access_type=="g":
update_every_single_provider_gravity_value_numpy(demands_numpy, solution, DEMANDS_COUNT)
# elif access_type=="h":
# update_every_single_hospital_h2sfca_value_numpy(demands_numpy, solution, DEMANDS_COUNT)
# 可达性适应度值,该值越大越好
y1_values_double[i_solution]=np.nansum(demands_numpy[:,0,0,1]) #获取到每一个的gravity value 之所以用nan,是为了过滤掉nan的值
# 计算公平性数值,该值越小表示越公平
# y2_values_double[i_solution]=np.cov(demands_numpy[:,0,0,1]) 这是未考虑人口数影响的公平性方程
y2_values_double[i_solution]=calculate_global_accessibility_equality_numpy(demands_numpy)
# 结束时间
# end_time1 = time.time()
# print('calculate_gravity_value() Running time: %s Seconds' % (end_time1 - start_time))
return y1_values_double, y2_values_double
"""
NSG2的基础组成部分
#Function to find index of list
"""
def get_index_of(a, list_obj):
for i in range(0, len(list_obj)):
if list_obj[i] == a:
return i
return -1
"""
NSG2的基础组成部分
#Function to sort by values
"""
def sort_by_values(front, y_values):
sorted_list = []
while(len(sorted_list)!=len(front)):
if get_index_of(min(y_values), y_values) in front:
sorted_list.append(get_index_of(min(y_values), y_values))
y_values[get_index_of(min(y_values), y_values)] = math.inf
return sorted_list
"""
NSG2的基础组成部分
#Function to carry out NSGA-II's fast non dominated sort
"""
def fast_non_dominated_sort(y1_values, y2_values):
S=[[] for i in range(0, np.shape(y1_values)[0])]
fronts = [[]]
n=[0 for i in range(0, np.shape(y1_values)[0])]
rank = [0 for i in range(0, np.shape(y1_values)[0])]
for p in range(0, np.shape(y1_values)[0]):
S[p]=[]
n[p]=0
for q in range(0, np.shape(y1_values)[0]):
# 这是目标函数,y1求大值,y2求小值
if (y1_values[p] > y1_values[q] and y2_values[p] < y2_values[q] ) :
if q not in S[p]:
# 个体p的支配集合Sp计算
S[p].append(q)
elif (y1_values[p] < y1_values[q] and y2_values[p] > y2_values[q] ) :
# 被支配度Np计算
# Np越大,则说明p个体越差
n[p] = n[p] + 1
if n[p]==0:
rank[p] = 0
if p not in fronts[0]:
fronts[0].append(p)
i = 0
while(fronts[i] != []):
Q=[]
for p in fronts[i]:
for q in S[p]:
n[q] =n[q] - 1
if( n[q]==0):
rank[q]=i+1
if q not in Q:
Q.append(q)
i = i+1
fronts.append(Q)
del fronts[len(fronts)-1]
return fronts
"""
NSGA2的基础组成部分
#Function to calculate crowding distance
"""
def crowding_distance(y1_values, y2_values, front):
distance = [0 for i in range(0,len(front))]
#根据y1的值做一次排序
sorted1 = sort_by_values(front, y1_values[:])
#根据y2的值做一次排序
sorted2 = sort_by_values(front, y2_values[:])
#第一个个体和最后一个个体,定义为无限远
distance[0] = DISTANCE_INFINTE
distance[len(front) - 1] = DISTANCE_INFINTE
#计算中间个体的距离
for k in range(1,len(front)-1):
distance[k] = distance[k]+ (y1_values[sorted1[k + 1]] - y2_values[sorted1[k - 1]]) / (max(y1_values) - min(y1_values)+DELATE)
for k in range(1,len(front)-1):
distance[k] = distance[k]+ (y1_values[sorted2[k + 1]] - y2_values[sorted2[k - 1]]) / (max(y2_values) - min(y2_values)+DELATE)
return distance
"""
#二维制图表达
"""
def draw_2d_plot(y1_values,y2_values):
fig = plt.figure(figsize=(12, 12))
ax11 = fig.add_subplot(111)
ax11.set_xlabel('y1', fontsize=15)
ax11.set_ylabel('y2', fontsize=15)
ax11.scatter(y1_values, y2_values)
plt.show()
"""
#NSGA2的主函数
"""
def execute_nsga2_numpy(demands_np,DEMANDS_COUNT,access_type="g"):
# 初始化种群 population = [[],[],[]]
population_ar = initial_population()
iteration_no = 0
#大的循环
while (iteration_no < ITERATION_NUM):
print(iteration_no)
# 生成两倍的后代,然后用于选择
population_double=np.empty(shape=[POP_SIZE*2, SOLUTION_LEN],dtype=np.float32)
population_double[0:POP_SIZE:1,:] = population_ar.copy()
for i_new in range(POP_SIZE,POP_SIZE*2):
a1 = random.randint(0, POP_SIZE - 1)
b1 = random.randint(0, POP_SIZE - 1)
# 通过crossover/limitation/mutation的方式生成新的soultion
population_double[i_new,:]=crossover_mutation_limitation(population_ar[a1], population_ar[b1])
# 评价适应度值
y1_values_double, y2_values_double = fitness_numpy(demands_np, population_double,DEMANDS_COUNT,access_type=access_type)
#test
# 打印过程结果
for index_x in range(0, POP_SIZE*2,4):
print("x={0},y1={1},y2={2}".format(population_double[index_x], round(y1_values_double[index_x], 3),
round(y2_values_double[index_x], 3)), end="\n")
# 非支配解排序
# 目标:y1--大,y2--小
non_do_sorted_double_fronts = fast_non_dominated_sort(y1_values_double.copy(), y2_values_double.copy())
# 拥挤度计算
c_distance_double = []
for i in range(0, len(non_do_sorted_double_fronts)):
c_distance_double.append(crowding_distance(y1_values_double.copy(), y2_values_double.copy(),non_do_sorted_double_fronts[i][:]))
# 生成新的一代
index_list_new_popu = []
for i in range(0, len(non_do_sorted_double_fronts)):
non_dominated_sorted_solution2_1 = [get_index_of(non_do_sorted_double_fronts[i][j], non_do_sorted_double_fronts[i]) for j in range(0, len(non_do_sorted_double_fronts[i]))]
front22 = sort_by_values(non_dominated_sorted_solution2_1[:], c_distance_double[i][:])
front = [non_do_sorted_double_fronts[i][front22[j]] for j in range(0, len(non_do_sorted_double_fronts[i]))]
front.reverse()
for index in front:
index_list_new_popu.append(index)
if (len(index_list_new_popu) == POP_SIZE):
break
if (len(index_list_new_popu) == POP_SIZE):
break
population_ar = [population_double[i] for i in index_list_new_popu]
iteration_no = iteration_no + 1
#结束大的循环
# 输出当前代的结果,当前代是上代的最优结果
y1_values, y2_values = fitness_numpy(demands_np,np.array(population_ar),DEMANDS_COUNT,access_type=access_type)
# 打印结果
for index_x in range(0,POP_SIZE):
print("x={0},y1={1},y2={2}".format(population_ar[index_x], round(y1_values[index_x], 3), round(y2_values[index_x], 3)), end="\n")
# 将最后一代的fitness结果打印出来
draw_2d_plot( y1_values, y2_values)
"""
主函数,基于numpy,包括了MongoDB对象操作、调取NSGA2优化算法
"""
def main_function_numpy_gravity():
# 初始化mongo对象
mongo_operater_obj = a_mongo_operater.MongoOperater(DB_NAME, COLLECTION_NAME)
# 获取到所有的记录
demands_np = mongo_operater_obj.find_records_format_numpy_1(0, DEMANDS_COUNT, PROVIDERS_COUNT)
# 创建和计算vj,便于后面可达性计算复用,放入了demands_np array中;
calculate_provider_vj_numpy_gravity(demands_np, DEMANDS_COUNT, PROVIDERS_COUNT)
# 调取NSGA2函数
execute_nsga2_numpy(demands_np,DEMANDS_COUNT,access_type="g")
#公用的全局变量
#交叉算子概率
CROSSOVER_PROB_THRESHOLD =0.45
#变异算子概率
MUTATION_PROB__THRESHOLD=0.1
#拥挤度距离防止分母为0
DELATE=2e-7
#拥挤度距离极大值
DISTANCE_INFINTE=444444444
#解空间的约束系数,2030年的儿科医生数量不超过700,左右限制区间为690-710
MAX_X=10
#最低值限制
MIN_X=1
#增加医生的数量
LIMINTATION=100
# 种群规模,一般为双数,便于交叉操作
POP_SIZE=100
#进化代数
ITERATION_NUM=20
#一个染色体含多个DNA,一个DNA含多个Gene,按这样的组织逻辑,解的空间从一维延伸到了二维
#染色体长度(特指含多少个DNA)
SOLUTION_LEN=40 #1个solution 对应的1个 chromesome
#需求点,即小区,的个数
DEMANDS_COUNT=184
#供给点,即充电桩,的个数,与SOLUTION_COUNT保持一致
PROVIDERS_COUNT=40
#MONGODB数据库的配置
DB_NAME="admin"
COLLECTION_NAME="moo_ps"
if __name__=="__main__":
main_function_numpy_gravity()
| [
"[email protected]"
] | |
716c09f6750529329620391f51c69fdaddde1b25 | 6a88ecf98454852c2d2014da9333534b3a066910 | /ga2016_final_project-master/ga2016_final_project-master/src/run.py | acf405829b84b93ce977ad9bdc2dcdd7f1e7ad67 | [
"MIT"
] | permissive | cbbjames/GA2016 | d37b7fdb88ebbf0a24e929df8a755d7724594f40 | a6a06ad3beeb64697ee8c0690ad6651d77ac122e | refs/heads/master | 2021-01-18T13:10:39.966270 | 2017-02-03T02:56:48 | 2017-02-03T02:56:48 | 80,728,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | import cost_function as cf
import genetic_programming as gp
import monte_carlo as mc
import pic
# global setting
x = 50
y = 50
target_image = pic.pic2rgb("../data/0703.jpg", x, y)
cf.set_target_image(target_image)
# gp
horizontal = gp.cut('H',2)
vertical = gp.cut('V',2)
env = gp.enviroment([horizontal, vertical], ["L color"],
target_image, size = 100, maxcut = 15, maxdepth = 3)
s_gp, cost_gp = env.envolve(maxgen = 10)
print("Best solution in GP: ", cost_gp)
print(s_gp)
pic.rgb2pic(cf.to_array(s_gp, x, y, 1), 'LAB', "../data/output/0703_gp.png")
print("NFE(GP) = ", env.get_nfe())
# mc
s_mc, cost_mc = mc.monte_carlo(env.get_nfe(), 5, target_image)
print("Best solution in MC: ", cost_mc)
print(s_mc)
pic.rgb2pic(cf.to_array(s_mc, x, y, 1), 'LAB', "../data/output/0703_mc.png")
| [
"[email protected]"
] | |
2bde8a3befdc778156a7218cbb0ab8f1fa8bfe96 | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /swagger_client/models/i_neighbor_filter_list.py | 4547f5160a2b9f912a8e71d8e80a789d7afe5262 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,160 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class INeighborFilterList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'filter_update_action': 'str',
'name': 'str',
'as_path_list': 'IReference',
'links': 'ILinks',
'id': 'str',
'type': 'str'
}
attribute_map = {
'filter_update_action': 'filterUpdateAction',
'name': 'name',
'as_path_list': 'asPathList',
'links': 'links',
'id': 'id',
'type': 'type'
}
def __init__(self, filter_update_action=None, name=None, as_path_list=None, links=None, id=None, type=None): # noqa: E501
"""INeighborFilterList - a model defined in Swagger""" # noqa: E501
self._filter_update_action = None
self._name = None
self._as_path_list = None
self._links = None
self._id = None
self._type = None
self.discriminator = None
if filter_update_action is not None:
self.filter_update_action = filter_update_action
if name is not None:
self.name = name
if as_path_list is not None:
self.as_path_list = as_path_list
if links is not None:
self.links = links
if id is not None:
self.id = id
if type is not None:
self.type = type
@property
def filter_update_action(self):
"""Gets the filter_update_action of this INeighborFilterList. # noqa: E501
:return: The filter_update_action of this INeighborFilterList. # noqa: E501
:rtype: str
"""
return self._filter_update_action
@filter_update_action.setter
def filter_update_action(self, filter_update_action):
"""Sets the filter_update_action of this INeighborFilterList.
:param filter_update_action: The filter_update_action of this INeighborFilterList. # noqa: E501
:type: str
"""
allowed_values = ["IN", "OUT"] # noqa: E501
if filter_update_action not in allowed_values:
raise ValueError(
"Invalid value for `filter_update_action` ({0}), must be one of {1}" # noqa: E501
.format(filter_update_action, allowed_values)
)
self._filter_update_action = filter_update_action
@property
def name(self):
"""Gets the name of this INeighborFilterList. # noqa: E501
:return: The name of this INeighborFilterList. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this INeighborFilterList.
:param name: The name of this INeighborFilterList. # noqa: E501
:type: str
"""
self._name = name
@property
def as_path_list(self):
"""Gets the as_path_list of this INeighborFilterList. # noqa: E501
:return: The as_path_list of this INeighborFilterList. # noqa: E501
:rtype: IReference
"""
return self._as_path_list
@as_path_list.setter
def as_path_list(self, as_path_list):
"""Sets the as_path_list of this INeighborFilterList.
:param as_path_list: The as_path_list of this INeighborFilterList. # noqa: E501
:type: IReference
"""
self._as_path_list = as_path_list
@property
def links(self):
"""Gets the links of this INeighborFilterList. # noqa: E501
:return: The links of this INeighborFilterList. # noqa: E501
:rtype: ILinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this INeighborFilterList.
:param links: The links of this INeighborFilterList. # noqa: E501
:type: ILinks
"""
self._links = links
@property
def id(self):
"""Gets the id of this INeighborFilterList. # noqa: E501
:return: The id of this INeighborFilterList. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this INeighborFilterList.
:param id: The id of this INeighborFilterList. # noqa: E501
:type: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this INeighborFilterList. # noqa: E501
:return: The type of this INeighborFilterList. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this INeighborFilterList.
:param type: The type of this INeighborFilterList. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(INeighborFilterList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, INeighborFilterList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
9dbc3b6ded0ce3d6c631589afa169837f05a64cd | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/coghq/DistributedLawOfficeFloor.py | 4b2ce5f87d907f8e5164c336752062e61c895ad3 | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 7,428 | py | from panda3d.core import *
from toontown.toonbase.ToontownGlobals import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
import random
from otp.level import DistributedLevel
from direct.directnotify import DirectNotifyGlobal
import LawOfficeBase
import FactoryEntityCreator
import FactorySpecs
from otp.level import LevelSpec
from otp.level import LevelConstants
from toontown.toonbase import TTLocalizer
from toontown.coghq import FactoryCameraViews
if __dev__:
from otp.level import EditorGlobals
class DistributedLawOfficeFloor(DistributedLevel.DistributedLevel, LawOfficeBase.LawOfficeBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawOffice')
def __init__(self, cr):
DistributedLevel.DistributedLevel.__init__(self, cr)
LawOfficeBase.LawOfficeBase.__init__(self)
self.suitIds = []
self.suits = []
self.reserveSuits = []
self.joiningReserves = []
self.suitsInitialized = 0
self.goonClipPlanes = {}
def createEntityCreator(self):
return FactoryEntityCreator.FactoryEntityCreator(level=self)
def generate(self):
self.notify.debug('generate')
DistributedLevel.DistributedLevel.generate(self)
self.factoryViews = FactoryCameraViews.FactoryCameraViews(self)
base.localAvatar.chatMgr.chatInputSpeedChat.addFactoryMenu()
if __dev__:
bboard.post(EditorGlobals.EditTargetPostName, self)
self.accept('SOSPanelEnter', self.handleSOSPanel)
def delete(self):
DistributedLevel.DistributedLevel.delete(self)
base.localAvatar.chatMgr.chatInputSpeedChat.removeFactoryMenu()
self.factoryViews.delete()
del self.factoryViews
self.ignore('SOSPanelEnter')
if __dev__:
bboard.removeIfEqual(EditorGlobals.EditTargetPostName, self)
def setLawOfficeId(self, id):
LawOfficeBase.LawOfficeBase.setLawOfficeId(self, id)
def setForemanConfronted(self, avId):
if avId == base.localAvatar.doId:
return
av = base.cr.identifyFriend(avId)
if av is None:
return
base.localAvatar.setSystemMessage(avId, TTLocalizer.ForemanConfrontedMsg % av.getName())
return
def setDefeated(self):
self.notify.info('setDefeated')
messenger.send('FactoryWinEvent')
def levelAnnounceGenerate(self):
self.notify.debug('levelAnnounceGenerate')
DistributedLevel.DistributedLevel.levelAnnounceGenerate(self)
specModule = FactorySpecs.getFactorySpecModule(self.lawOfficeId)
factorySpec = LevelSpec.LevelSpec(specModule)
if __dev__:
typeReg = self.getEntityTypeReg()
factorySpec.setEntityTypeReg(typeReg)
DistributedLevel.DistributedLevel.initializeLevel(self, factorySpec)
def privGotSpec(self, levelSpec):
if __dev__:
if not levelSpec.hasEntityTypeReg():
typeReg = self.getEntityTypeReg()
levelSpec.setEntityTypeReg(typeReg)
firstSetZoneDoneEvent = self.cr.getNextSetZoneDoneEvent()
def handleFirstSetZoneDone():
base.factoryReady = 1
messenger.send('FactoryReady')
self.acceptOnce(firstSetZoneDoneEvent, handleFirstSetZoneDone)
modelCount = len(levelSpec.getAllEntIds())
loader.beginBulkLoad('factory', TTLocalizer.HeadingToFactoryTitle % TTLocalizer.FactoryNames[self.lawOfficeId], modelCount, 1, TTLocalizer.TIP_COGHQ)
DistributedLevel.DistributedLevel.privGotSpec(self, levelSpec)
loader.endBulkLoad('factory')
messenger.send('LawOffice_Spec_Loaded')
def printPos(self = self):
pos = base.localAvatar.getPos(self.getZoneNode(self.lastToonZone))
h = base.localAvatar.getH(self.getZoneNode(self.lastToonZone))
print 'factory pos: %s, h: %s, zone %s' % (repr(pos), h, self.lastToonZone)
posStr = 'X: %.3f' % pos[0] + '\nY: %.3f' % pos[1] + '\nZ: %.3f' % pos[2] + '\nH: %.3f' % h + '\nZone: %s' % str(self.lastToonZone)
base.localAvatar.setChat(posStr, CFThought, 0)
self.accept('f2', printPos)
base.localAvatar.setCameraCollisionsCanMove(1)
self.acceptOnce('leavingFactory', self.announceLeaving)
def handleSOSPanel(self, panel):
avIds = []
for avId in self.avIdList:
if base.cr.doId2do.get(avId):
avIds.append(avId)
panel.setFactoryToonIdList(avIds)
def handleFloorDone(self):
self.sendUpdate('readyForNextFloor')
def disable(self):
self.notify.debug('disable')
base.localAvatar.setCameraCollisionsCanMove(0)
if hasattr(self, 'suits'):
del self.suits
if hasattr(self, 'relatedObjectMgrRequest') and self.relatedObjectMgrRequest:
self.cr.relatedObjectMgr.abortRequest(self.relatedObjectMgrRequest)
del self.relatedObjectMgrRequest
DistributedLevel.DistributedLevel.disable(self)
def setSuits(self, suitIds, reserveSuitIds):
oldSuitIds = list(self.suitIds)
self.suitIds = suitIds
self.reserveSuitIds = reserveSuitIds
newSuitIds = []
for suitId in self.suitIds:
if suitId not in oldSuitIds:
newSuitIds.append(suitId)
if len(newSuitIds):
def bringOutOfReserve(suits):
for suit in suits:
suit.comeOutOfReserve()
self.relatedObjectMgrRequest = self.cr.relatedObjectMgr.requestObjects(newSuitIds, bringOutOfReserve)
def reservesJoining(self):
pass
def getCogSpec(self, cogId):
cogSpecModule = FactorySpecs.getCogSpecModule(self.lawOfficeId)
return cogSpecModule.CogData[cogId]
def getReserveCogSpec(self, cogId):
cogSpecModule = FactorySpecs.getCogSpecModule(self.lawOfficeId)
return cogSpecModule.ReserveCogData[cogId]
def getBattleCellSpec(self, battleCellId):
cogSpecModule = FactorySpecs.getCogSpecModule(self.lawOfficeId)
return cogSpecModule.BattleCells[battleCellId]
def getFloorOuchLevel(self):
return 2
def getGoonPathId(self):
return 'sellbotFactory'
def getTaskZoneId(self):
return self.lawOfficeId
def getBossTaunt(self):
return TTLocalizer.FactoryBossTaunt
def getBossBattleTaunt(self):
return TTLocalizer.FactoryBossBattleTaunt
def placeLocalToon(self):
initialZoneEnt = None
if self.entranceId in self.entranceId2entity:
epEnt = self.entranceId2entity[self.entranceId]
initialZoneEnt = self.getEntity(epEnt.getZoneEntId())
elif self.EmulateEntrancePoint:
self.notify.debug('unknown entranceId %s' % self.entranceId)
self.notify.debug('showing all zones')
self.setColorZones(1)
zoneEntIds = list(self.entType2ids['zone'])
zoneEntIds.remove(LevelConstants.UberZoneEntId)
if len(zoneEntIds):
zoneEntId = random.choice(zoneEntIds)
initialZoneEnt = self.getEntity(zoneEntId)
else:
initialZoneEnt = self.getEntity(LevelConstants.UberZoneEntId)
if initialZoneEnt is not None:
self.enterZone(initialZoneEnt.entId)
return
| [
"[email protected]"
] |
Subsets and Splits