blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2a5087c17e3ec8a8e4d4b649ac3df25b74348a3 | 9d95509a23c5a6eee5a19e896a91c062ee328c6f | /day5/98sort.py | fe2fb28dde295b674c75658a8c1e53f9cc7237ee | [] | no_license | YuanShisong/pythonstudy | a50fd5fa1098170e35f8ca4cd3fa33c0990d79b7 | 1b8db5e79ddea7ed4c7c0756589db94ffb0c3041 | refs/heads/master | 2021-05-14T09:50:42.455615 | 2018-01-24T03:02:02 | 2018-01-24T03:02:02 | 116,335,843 | 0 | 0 | null | 2018-01-18T06:16:31 | 2018-01-05T03:19:30 | Python | UTF-8 | Python | false | false | 1,528 | py | """
This is a pure python implementation of the shell sort algorithm
For doctests run following command:
python -m doctest -v shell_sort.py
or
python3 -m doctest -v shell_sort.py
For manual testing run:
python shell_sort.py
"""
from __future__ import print_function
def shell_sort(collection):
"""Pure implementation of shell sort algorithm in Python
:param collection: Some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
>>> shell_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> shell_sort([])
[]
>>> shell_sort([-2, -5, -45])
[-45, -5, -2]
"""
# Marcin Ciura's gap sequence
gaps = [701, 301, 132, 57, 23, 10, 4, 1]
for gap in gaps:
i = gap
while i < len(collection):
temp = collection[i]
j = i
while j >= gap and collection[j - gap] > temp:
collection[j] = collection[j - gap]
j -= gap
collection[j] = temp
i += 1
return collection
if __name__ == '__main__':
import sys
# For python 2.x and 3.x compatibility: 3.x has no raw_input builtin
# otherwise 2.x's input builtin function is too "smart"
if sys.version_info.major < 3:
input_function = raw_input
else:
input_function = input
user_input = input_function('Enter numbers separated by a comma:\n')
unsorted = [int(item) for item in user_input.split(',')]
print(shell_sort(unsorted)) | [
"[email protected]"
] | |
9b756ec4494354ef5ad2ddec151f066269253ed6 | e74e89592d8a3b1a0b465a7b1595708b224362d2 | /pset_dicts/dict_basics/tests/test4.py | 701188e0f5203261b1868252f94c728dd892570e | [
"MIT"
] | permissive | mottaquikarim/pydev-psets | 016f60f1e9d9a534bd9a66ecde8eb412beee37d1 | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | refs/heads/master | 2023-01-10T11:15:57.041287 | 2021-06-07T23:38:34 | 2021-06-07T23:38:34 | 178,547,933 | 5 | 2 | MIT | 2023-01-03T22:28:27 | 2019-03-30T11:09:08 | Jupyter Notebook | UTF-8 | Python | false | false | 343 | py | """
Lists to Dicts
"""
# import io
# import pytest
# from unittest import TestCase
# from unittest.mock import patch
@pytest.mark.describe('Lists to Dicts - XXX')
class TestPrint(TestCase):
@pytest.mark.describe('Lists to Dicts - XXX')
class TestPrint(TestCase):
@pytest.mark.describe('Lists to Dicts - XXX')
class TestPrint(TestCase): | [
"[email protected]"
] | |
f12805adc6bab99cbae282a4d2b62b3378a7db07 | b9b967c8154ffb3c3622c4b46065132a33e785f6 | /server/migrations/versions/e1cc6076c951_consisted_naming_service_requests.py | a8bfe316cb39cd4a80ed3be5dd4f6b9cee4aed98 | [
"Apache-2.0"
] | permissive | SURFscz/SBS | 5917561656caec042e5a6c966aeb54b82e96f51d | b159eeb7a5b8246aebd9849b4b3b61b9af1a8514 | refs/heads/main | 2023-08-31T12:42:52.473898 | 2023-08-31T11:58:51 | 2023-08-31T11:58:51 | 162,148,147 | 4 | 1 | Apache-2.0 | 2023-09-12T12:07:41 | 2018-12-17T15:05:54 | JavaScript | UTF-8 | Python | false | false | 498 | py | """Consisted naming service requests
Revision ID: e1cc6076c951
Revises: e8f8c9ffc508
Create Date: 2023-08-28 16:02:56.556798
"""
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = 'e1cc6076c951'
down_revision = 'e8f8c9ffc508'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(text("ALTER TABLE service_requests CHANGE short_name abbreviation varchar(255) NOT NULL"))
def downgrade():
pass
| [
"[email protected]"
] | |
9e3ad4ed89a5222faa933a0c37908209c44acbe2 | 5173fbfdf7b2293fb590836106ac76e4ccadea9d | /car/path_simple.py | 6a9013d09cba2005e59b4a6d7e6f5ad631948c76 | [] | no_license | babraham123/car_route_viewer | 28f177193d2cc2ca8faec5d6f89c44d26331a3ef | 2c5b04d1ba8d20d84c72809a984cf2666aa892a8 | refs/heads/master | 2021-01-09T05:27:16.613110 | 2017-02-17T11:26:38 | 2017-02-17T11:26:38 | 80,771,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | import IoRTcar as iort
import sys
import math
import networkx as nx
import matplotlib as mpl
mpl.use('Agg') # Agg backend insted of X
import matplotlib.pyplot as plt
def dist(n1, n2):
dx = float(n1['pos_x']) - float(n2['pos_x']);
dy = float(n1['pos_y']) - float(n2['pos_y']);
return math.sqrt(dx*dx + dy*dy)
def searchEdge(array, val):
for edg in array:
if edg['e_name'] == val:
return edg
def searchNode(array, val):
for nod in array:
if nod['n_name'] == val:
return nod
def gen_graph(pdata) :
ret = nx.DiGraph()
# for n in pdata['node']:
# ret.add_node(n['n_name'])
for info in pdata['info']:
edge = searchEdge(pdata['edge'], info['e_name'])
#print(info['e_name'], edge)
n1 = searchNode(pdata['node'], edge['n1_name'])
n2 = searchNode(pdata['node'], edge['n2_name'])
ret.add_node(n1['n_name'])
ret.add_node(n2['n_name'])
#print(edge['n1_name'], n1, edge['n2_name'], n2)
l = dist(n1, n2)
w1 = float(info['e_w1'])
w2 = float(info['e_w2'])
if w1 >= 0.01 and w1 <= 1.0 :
ret.add_edge(n1['n_name'], n2['n_name'], weight = l/w1)
if w2 >= 0.01 and w2 <= 1.0 :
ret.add_edge(n2['n_name'], n1['n_name'], weight = l/w2)
return ret
# main
iort.init(sys.argv[1:])
pdata = iort.read_map("simple")
#print(pdata)
# list to dictionary
node = {}
for n in pdata['node']:
node[n['n_name']] = { 'name' : n['n_name'], 'pos_x' : float(n['pos_x']), 'pos_y' : float(n['pos_y']) }
G = gen_graph(pdata)
#print(G.nodes())
#print(G.edges(data=True))
#print(G.edges())
# print shortest path
nlist = nx.dijkstra_path(G, source='n001', target='n220')
print(nlist)
#path1 = []
#for n in range(len(nlist)):
# n1 = n+1
# #print(nlist[n])
# path1.append({ 'seq': n1,
# 'pos_x': node[nlist[n]]['pos_x'],
# 'pos_y': node[nlist[n]]['pos_y'],
# 'name' : node[nlist[n]]['name'] })
#print(path1)
#iort.reg_path("tomotake", "prog1", int(100), path1)
#path = iort.read_path("tomotake", "prog1")
#print(path)
nlist = nx.dijkstra_path(G, source='n220', target='n001')
print(nlist)
#pos = nx.spring_layout(G)
#nx.draw(G)
#plt.savefig("map.png")
| [
"[email protected]"
] | |
3babca11b3e50070ce275e841463743310cbaefb | 4977211a956839dcd11020a2166ecae240666c68 | /src/dongFang.py | 3c8f38d103d576750c57b1b70fe012ccfbb16805 | [] | no_license | DuffyWang926/getStockApp | 45e35ac14ac7027ec9add33f161c365af4dedcad | 2881eb1265b2c651eacb802182be3801a78f4ec0 | refs/heads/master | 2023-05-02T23:58:48.306368 | 2021-04-30T06:55:52 | 2021-04-30T06:55:52 | 333,023,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,346 | py | import os
from time import sleep
import unittest
from appium import webdriver
from src.getPwdData import getPwd
from utils.verify import isExist
from setting import getSetting
from mysql.initDB import initMysql
# driver.find_element_by_id('android:id/content')
# driver.find_element_by_class_name('android.view.View')
# driver.find_element_by_xpath('//android.view.View[contains(@text, "去认购")]')
# driver.find_element_by_android_uiautomator('new UiSelector().text("(01490.HK)")')
# driver.find_element_by_android_uiautomator('new UiSelector().textContains("4000")')
def buyDongFang(param):
code = param['code']
isCash = param['isCash']
stockNumVal = param['numVal']
stockNum = param['num']
isFinancingAll = param['isFinancingAll']
isCashAll = param['isCashAll']
settingIndex = param['setIndex']
settingData = getSetting(settingIndex)
settingData['appPackage'] = 'hk.com.dfzq.dfyy'
settingData['appActivity'] = 'com.tdx.Android.LaunchActivity'
desired_caps = settingData
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.close_app();
sleep(3)
driver.launch_app();
sleep(5)
driver.find_element_by_android_uiautomator('new UiSelector().text("交易")').click()
sleep(5)
path = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.support.v4.view.ViewPager/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View[3]'
tradePath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.support.v4.view.ViewPager/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View[3]'
print(driver.page_source)
print(driver.contexts)
# driver.find_element_by_xpath(path).click()
driver.find_element_by_xpath(tradePath).click()
sleep(1)
loginDongFang(driver)
driver.find_element_by_android_uiautomator('new UiSelector().text("新股申购")').click()
sleep(2)
codePath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View[2]/android.widget.GridView/android.view.View[2]/android.view.View[1]/android.view.View[2]'
codeText = driver.find_element_by_xpath(codePath).text
if codeText == code:
driver.find_element_by_xpath(codePath).click()
sleep(1)
codeConfirmPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View[1]/android.widget.GridView/android.view.View[2]/android.view.View/android.view.View[2]/android.view.View/android.view.View/android.view.View'
driver.find_element_by_xpath(codeConfirmPath).click()
sleep(1)
if not isCash:
typePath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[1]/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[4]/android.view.View[3]/android.view.View/android.view.View/android.view.View[1]'
driver.find_element_by_xpath(typePath).click()
sleep(1)
financePath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[2]/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View/android.widget.GridView/android.view.View[2]/android.view.View'
driver.find_element_by_xpath(financePath).click()
sleep(1)
amountPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[3]/android.view.View[3]/android.view.View/android.view.View[1]/android.view.View'
driver.find_element_by_xpath(amountPath).click()
sleep(1)
amountFlag = True
initNum = 0
while amountFlag:
amountListPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[2]/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[2]/android.widget.GridView/android.view.View/android.view.View[1]'
amountView = driver.find_elements_by_xpath(amountListPath)
index = str(initNum+1)
amountNumPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[2]/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[1]/android.widget.GridView/android.view.View['+ index + ']/android.view.View[1]'
amountNumText = driver.find_element_by_xpath(amountNumPath).text
print(amountNumText)
if isFinancingAll:
initNum = 1
hasCashNumPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[1]/android.view.View[2]/android.view.View[2]/android.view.View/android.view.View[5]/android.view.View[2]/android.view.View/android.view.View'
hasCashNum = int(float(driver.find_element_by_xpath(hasCashNumPath).text))
oneNumMoneyPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[2]/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[1]/android.widget.GridView/android.view.View[1]/android.view.View[2]'
oneNumMoney = float(driver.find_element_by_xpath(oneNumMoneyPath).text)
oneNumPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[2]/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[1]/android.widget.GridView/android.view.View[1]/android.view.View[1]'
oneNum = int(driver.find_element_by_xpath(oneNumPath).text)
stockNumEnd = int(hasCashNum/0.1/oneNumMoney)* oneNum
stockNum = stockNumEnd
amountNumText = int(amountNumText)
if amountNumText == stockNum:
amountFlag = False
elif amountNumText == stockNum:
amountFlag = False
sleep(1)
initNum += 1
if amountFlag:
driver.swipe(200,2100,200,1990,300)
sleep(1)
else:
driver.find_element_by_xpath(amountNumPath).click()
buyPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View[1]/android.view.View[1]/android.view.View/android.view.View/android.view.View/android.view.View[5]/android.view.View/android.view.View/android.view.View/android.widget.Button'
driver.find_element_by_xpath(buyPath).click()
sleep(1)
confirmEndPath = '/hierarchy/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.LinearLayout[3]/android.widget.LinearLayout/android.widget.Button[1]'
# print(driver.find_element_by_xpath(buyPath).click())
print(driver.find_element_by_xpath(confirmEndPath).text)
sleep(2)
driver.quit()
def getDongFangProperty(param):
settingIndex = param['setIndex']
settingData = getSetting(settingIndex)
settingData['appPackage'] = 'hk.com.dfzq.dfyy'
settingData['appActivity'] = 'com.tdx.Android.LaunchActivity'
desired_caps = settingData
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.close_app();
sleep(3)
driver.launch_app();
sleep(5)
driver.find_element_by_android_uiautomator('new UiSelector().text("交易")').click()
sleep(7)
print(driver.page_source)
path = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.support.v4.view.ViewPager/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View[3]'
driver.find_element_by_xpath(path).click()
sleep(1)
loginDongFang(driver)
allPatha = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.support.v4.view.ViewPager/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[1]/android.widget.TextView[1]'
allPathb = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.support.v4.view.ViewPager/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[1]/android.widget.TextView[2]'
allNum = driver.find_element_by_xpath(allPatha).text + driver.find_element_by_xpath(allPathb).text
availablePath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[2]/android.support.v4.view.ViewPager/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View/android.view.View[1]/android.view.View[12]/android.view.View/android.view.View/android.view.View[7]'
availableNum = driver.find_element_by_xpath(availablePath).text
param = {
'method':0,
'tableName':'dongFang0',
'allNum':allNum,
'availableNum':availableNum,
}
initMysql(param)
driver.quit()
def loginDongFang(driver):
pwd = getPwd('dongFang')['tradePwd']
driver.find_elements_by_class_name('android.widget.EditText')[1].send_keys(pwd)
loginPath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.support.v4.widget.DrawerLayout/android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.LinearLayout/android.widget.LinearLayout/android.widget.RelativeLayout[7]/android.view.View'
driver.find_element_by_xpath(loginPath).click()
sleep(1)
agreePath = '/hierarchy/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[3]/android.widget.LinearLayout/android.view.View[2]'
driver.find_element_by_xpath(agreePath).click()
sleep(6)
confirmPwdPath = '/hierarchy/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout[3]/android.widget.LinearLayout/android.view.View[2]'
if isExist(driver, 2,confirmPwdPath):
driver.find_element_by_xpath(confirmPwdPath).click()
sleep(3)
| [
"[email protected]"
] | |
c3c40502679dc6d8ddccdd7851c9843f92887a85 | ba42521d4775ef8e5f67282aa80bb55e2f089cee | /project/users/views/favorites.py | e7849790e1834ea42113cdb911c4b1e56f1642c6 | [] | no_license | timehollyname/Foodgram | 8014861aa906f63db490cf3c3c5fdbb4a80c407f | c1ede98920637ab06ce63ab0568f77c5043c4dce | refs/heads/master | 2023-06-02T22:40:41.137278 | 2021-06-14T12:45:25 | 2021-06-14T12:45:25 | 370,342,204 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView
from recipes.models import Recipe # noqa
class FavoritesView(LoginRequiredMixin, ListView):
context_object_name = 'recipes'
template_name = 'users/favorites.html'
paginate_by = settings.PAGINATION_RECIPES_SIZE
def get_queryset(self):
return Recipe.objects.filter(
favorites__user__id=self.request.user.id
).get_by_tags(
self.request.GET
).select_related(
'author'
).prefetch_related(
'tags'
).distinct()
| [
"[email protected]"
] | |
185ed85a98c4134b431f0bceeb3181d30a573dbc | 35f2674295aac1a29df9c7ffa8e1cd788c39d83c | /pa3/TestCases/S3/output/q5-array-test2-normal.tac | 56a5eaba387dffb6cc7021e66043dbaa1942698f | [] | no_license | carpediem2/decaf-complier | 16ecc680db44e2cb0c18d5688f2993c7821a77be | fe0ecc70692d73ea8d0fa75b8837f00245d27ddf | refs/heads/master | 2020-05-06T15:17:45.814127 | 2019-01-09T02:37:02 | 2019-01-09T02:37:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | tac | VTABLE(_Main) {
<empty>
Main
}
FUNCTION(_Main_New) {
memo ''
_Main_New:
_T0 = 4
parm _T0
_T1 = call _Alloc
_T2 = VTBL <_Main>
*(_T1 + 0) = _T2
return _T1
}
FUNCTION(main) {
memo ''
main:
_T4 = 3
_T5 = 2
_T6 = 0
_T7 = (_T5 < _T6)
if (_T7 == 0) branch _L10
_T8 = "Decaf runtime error: The length of the created array should not be less than 0.\n"
parm _T8
call _PrintString
call _Halt
_L10:
_T9 = 4
_T10 = (_T9 * _T5)
_T11 = (_T9 + _T10)
parm _T11
_T12 = call _Alloc
*(_T12 + 0) = _T5
_T13 = 0
_T12 = (_T12 + _T11)
_L11:
_T11 = (_T11 - _T9)
if (_T11 == 0) branch _L12
_T12 = (_T12 - _T9)
*(_T12 + 0) = _T13
branch _L11
_L12:
_T14 = 0
_L14:
_T15 = (_T14 < _T5)
if (_T15 == 0) branch _L13
_T16 = 4
_T17 = (_T14 * _T16)
_T18 = (_T12 + _T17)
*(_T18 + 0) = _T4
_T19 = 1
_T20 = (_T14 + _T19)
_T14 = _T20
branch _L14
_L13:
_T3 = _T12
_T22 = 2
_T23 = 9
_T25 = 0
_T26 = (_T22 >= _T25)
if (_T26 == 0) branch _L15
_T27 = *(_T3 - 4)
_T28 = (_T22 < _T27)
if (_T28 == 0) branch _L15
_T29 = 4
_T30 = (_T22 * _T29)
_T31 = (_T3 + _T30)
_T32 = *(_T31 + 0)
_T24 = _T32
branch _L16
_L15:
_T24 = _T23
_L16:
_T21 = _T24
_T34 = 1
_T35 = 1
_T37 = 0
_T38 = (_T34 >= _T37)
if (_T38 == 0) branch _L17
_T39 = *(_T3 - 4)
_T40 = (_T34 < _T39)
if (_T40 == 0) branch _L17
_T41 = 4
_T42 = (_T34 * _T41)
_T43 = (_T3 + _T42)
_T44 = *(_T43 + 0)
_T36 = _T44
branch _L18
_L17:
_T36 = _T35
_L18:
_T33 = _T36
parm _T21
call _PrintInt
parm _T33
call _PrintInt
}
| [
"[email protected]"
] | |
39a7a7910947b23ad6224bea3ea1134b4edcabd0 | 9300bb32ea2256f42fd17b35ad6591c57aff7320 | /Homework2/transition.py | 41ab0dece17b93f5c176bc99c359bee09ec49260 | [] | no_license | aaumsq/NLP_HW2 | 4a4f1a96d541815d371922a5b0f10dd6669976d8 | 2a5756d1e117aae8cd8234bb5dc6b4280091b850 | refs/heads/master | 2021-01-11T04:12:13.450460 | 2015-11-05T01:32:47 | 2015-11-05T01:32:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | py | class Transition(object):
"""
This class defines a set of transitions which are applied to a
configuration to get the next configuration.
"""
# Define set of transitions
LEFT_ARC = 'LEFTARC'
RIGHT_ARC = 'RIGHTARC'
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
def __init__(self):
raise ValueError('Do not construct this object!')
@staticmethod
def left_arc(conf, relation):
"""
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
# word in stack cannot be a dependency. Also, cannot be root
idx_last_stack = conf.stack[-1]
is_root = (idx_last_stack is 0)
precond_met = (not Transition.is_index_dependent(idx_last_stack, conf.arcs)) and not is_root
if not precond_met:
return -1
# do left arc
conf.stack.pop(-1)
# get first thing in buffer
idx_first_buffer = conf.buffer[0]
# create the arch dependency
conf.arcs.append((idx_first_buffer, relation, idx_last_stack))
@staticmethod
def right_arc(conf, relation):
"""
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
# return -1 if buffer or stack are empty/null
if not conf.buffer or not conf.stack:
return -1
# You get this one for free! Use it as an example.
idx_wi = conf.stack[-1] # get last item in stack
idx_wj = conf.buffer.pop(0) # get first thing in buffer
conf.stack.append(idx_wj) # add thing just popped from buffer
conf.arcs.append((idx_wi, relation, idx_wj)) # create the arch dependency
@staticmethod
def reduce(conf):
"""
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
# return -1 if buffer or stack are empty/null
if not conf.buffer or not conf.stack:
return -1
# pointer to item to examine
idx_last_stack = conf.stack[-1]
# pre condition, last item on stack must be a dependant of something (must be on the right of a relation
precond_met = Transition.is_index_dependent(idx_last_stack, conf.arcs)
# conduct reduce
if precond_met:
conf.stack.pop(-1)
else:
return -1
@staticmethod
def is_index_dependent(index, arcs):
"""
Determines if the given index is already a dependent in the arcs passed in
:param index:
:param arcs:
:return:
"""
is_dep = False
# see if item is a dep
for arc in arcs:
parent, relation, child = arc
if child is index:
is_dep = True
break
return is_dep
@staticmethod
def shift(conf):
"""
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
# return -1 if buffer or stack are empty/null
if not conf.buffer:
return -1
# push first item of buffer into stack
conf.stack.append(conf.buffer.pop(0))
| [
"[email protected]"
] | |
c8e1b2ac3f4d393d1c619b5af8bdf3958138ab78 | 019d16e6a5f1078693d7d91eb8e7ba6de1c3ee27 | /Gradiant/display_tk.py | 1c52cfef0d430b584ba8bd60a7dcb64a92eb6668 | [] | no_license | hank2q/Scripts | 168fa6682dfc8d4476c2bd7d5dad5f89908e45a8 | 24fa6faee2e6f35e787d8f91d518744ab0863c66 | refs/heads/master | 2023-01-05T09:50:55.437826 | 2020-11-06T01:12:00 | 2020-11-06T01:12:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | from tkinter import *
from gradiant_generator import get_levels
import display_html
def parse_colors(starting_colors, ending_colors, levels, choice):
sr, sg, sb = int(starting_colors[:2], base=16), int(
starting_colors[2:4], base=16), int(starting_colors[4:6], base=16)
er, eg, eb = int(ending_colors[:2], base=16), int(
ending_colors[2:4], base=16), int(ending_colors[4:6], base=16)
r_range = get_levels(sr, er, levels)
g_range = get_levels(sg, eg, levels)
b_range = get_levels(sb, eb, levels)
complete = list(zip(r_range[0], g_range[0], b_range[0]))
complete_hex = [''.join(t)
for t in list(zip(r_range[1], g_range[1], b_range[1]))]
if choice == 1:
display(complete_hex)
else:
display_html.make_html(complete_hex)
def get_colors():
window = Tk()
window.geometry('300x400')
Label(window, text='Enter starting hex color:').pack(pady=8)
starting_color = Entry(window)
starting_color.pack(pady=8)
Label(window, text='Enter ending hex color:').pack(pady=8)
ending_color = Entry(window)
ending_color.pack(pady=8)
Label(window, text='Enter levels of transition:').pack(pady=8)
levels = Entry(window)
levels.pack(pady=8)
Label(window, text='Select where to display the results').pack(pady=8)
num = IntVar()
Radiobutton(window, text='GUI', variable=num, value=1).pack(pady=5)
Radiobutton(window, text='HTML', variable=num, value=2).pack(pady=5)
Button(window, text='Convert', command=lambda: parse_colors(
starting_color.get(), ending_color.get(), int(levels.get()), num.get())).pack(pady=8)
window.mainloop()
def display(hexes):
root = Tk()
for color in hexes:
frame = Frame(root)
Canvas(frame, bg='#'+color, height=100, width=100).pack(side=TOP)
# data_string = StringVar()
# data_string.set("#"+color)
name = Label(frame, text=f'#{color}', fg="black", bg="white", bd=0)
name.pack(side=TOP, padx=2)
frame.pack(side=LEFT)
root.mainloop()
if __name__ == "__main__":
get_colors()
| [
"[email protected]"
] | |
ce4a4dc5cf05be5050a4a06bd1f80afbf50410fc | 55033f698ea1e34e49ccc74c523cedbcf119b113 | /PycharmProjects/examples/Числа фібоначі простий вивід і списком.py | 982da14818b8bae3e5bc46d81fe88c8c80094bc6 | [] | no_license | mykola-444/video_courses_python | ddcc501f35686b4f617f210da4a6814d6e75df85 | 0c169af2228054bc0ca5302898ae49a65b8b9b38 | refs/heads/master | 2023-06-09T20:21:35.167145 | 2021-07-01T21:34:50 | 2021-07-01T21:34:50 | 381,834,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | '''\
Генерация и вывод чисел Фибоначчи
'''
def fib(n):
'''Выводит последовательность чисел Фибоначчи,
не превышающих n'''
a, b = 0, 1
while b < n:
print (b),
a, b = b, a+b
fib(15)
def fib2(n):
'''Возвращает список, содержащий числа ряда
Фибоначчи, не превышающие n'''
result = []
a, b = 0, 1
while b < n:
result.append(b)
a, b = b, a+b
return result
print (fib2(129))
| [
"[email protected]"
] | |
6dfb08c5365684661b00c9d978bc769749b07339 | 51a34aac71add1b0e92b11ae457774cb2e51f3ee | /nltk_download_utils.py | 2663fb0eb39f43bdb0573942033a2edde7ae83b1 | [] | no_license | krishnakaushik25/NLP-Tasks-Streamlit | a050d1a464426e71791fcac814fcdf0f864536a0 | 86d611ba5a5e83abf379c2d4853d7d00cddbf0f0 | refs/heads/main | 2023-07-20T07:33:50.448718 | 2021-09-06T16:19:31 | 2021-09-06T16:19:31 | 403,672,698 | 0 | 1 | null | 2021-09-06T15:32:57 | 2021-09-06T15:28:47 | Python | UTF-8 | Python | false | false | 65 | py | import nltk
nltk.download('wordnet')
nltk.download('punkt')
| [
"[email protected]"
] | |
d2b1ec90e353e52a4aff03d47a793108f4455f7d | 116242b7d434f3820c4cf374869003ae5b0798a4 | /iten/mpi_md_tensile_save.py | 366d725dae26ca849d48ea69e139bed5c7849a83 | [] | no_license | chaomy/Mechanics_cal | 701243656c4c381a1b77b67707dd858d0e55d3ed | 6ca55ebca73943282b2378b7bfc32d9da54eed99 | refs/heads/master | 2020-07-15T08:53:45.630516 | 2019-05-09T20:39:28 | 2019-05-09T20:39:28 | 94,307,592 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,918 | py | #!/usr/bin/env python
# encoding: utf-8
# -*- coding: utf-8 -*-
# @Author: yang37
# @Date: 2017-06-12 17:03:43
# @Last Modified by: chaomy
# @Last Modified time: 2018-02-05 23:59:58
import copy
import os
import numpy as np
from multiprocessing import Pool
import shutil
import sys
try:
import get_data
import output_data
import gn_lmp_infile
except ImportError:
print("error during import")
__version__ = 0.01
__author__ = 'Chaoming Yang'
__all__ = ['md_tensile']
def unwrap_self_run_lammps(arg, **kwarg):
return md_loop_tensile.lammps_job(*arg, **kwarg)
class md_tensile(get_data.get_data,
output_data.output_data,
gn_lmp_infile.gn_md_infile):
def __init__(self, element,
lattice_constant, size,
orientation, structure):
get_data.get_data.__init__(self)
output_data.output_data.__init__(self)
gn_lmp_infile.gn_md_infile.__init__(self)
self.element = element
self.lattice_constant = lattice_constant
self.set_lattice_constant()
self.size = size
self.orientation = orientation
self.M = self.get_M()
self.Sij = self.get_Sij()
self.structure = structure
self.lx, self.ly, self.lz = 0, 0, 0
self.addedstrain = np.mat([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]], "float")
self.stress_original = np.array([0, 0, 0, 0, 0, 0],
dtype="float")
def update_strain(self, Inputstrain,
stress, Correct_strain):
addedstrain = copy.deepcopy(self.addedstrain)
if abs(stress[1]) > self._stress_ThrValue:
addedstrain[1, 1] = Inputstrain[1]
Correct_strain[1, 1] += addedstrain[1, 1]
if abs(stress[2]) > self._stress_ThrValue:
addedstrain[2, 2] = Inputstrain[2]
Correct_strain[2, 2] += addedstrain[2, 2]
return Correct_strain
def gn_bcc_tpath(self,
delta,
Correct_strain):
element = self.element
M = self.get_M()
lattice_constant = self.lattice_constant
size = self.size
M = self.M
# original_strain = np.matrix([[1 + delta, 0, 0],
# [0, 1 + 0.2 * delta, 0],
# [0, 0, 1 - 0.2 * delta]],
# "float") + Correct_strain
Transformed_strain = M.transpose() * self.strainmtx * M
Base_vector = np.matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
Transposed_Base = Transformed_strain * Base_vector
xsize = size[0]
ysize = size[1]
zsize = size[2]
a = 1. / xsize
b = 1. / ysize
c = 1. / zsize
Lengthx = Transposed_Base[0, :] * Transposed_Base[0, :].transpose()
Lengthy = Transposed_Base[1, :] * Transposed_Base[1, :].transpose()
Lengthz = Transposed_Base[2, :] * Transposed_Base[2, :].transpose()
Lengthx = np.sqrt(Lengthx)
Lengthx *= lattice_constant
Lengthy = np.sqrt(Lengthy)
Lengthy *= lattice_constant
Lengthz = np.sqrt(Lengthz)
Lengthz *= lattice_constant
xlo, xhi = 0.0, Lengthx * xsize
ylo, yhi = 0.0, Lengthy * ysize
zlo, zhi = 0.0, Lengthz * zsize
AtomNumber = int(xsize * ysize * zsize) * 2
XDirection, YDirection, ZDirection = [], [], []
for z in range(zsize):
for y in range(ysize):
for x in range(xsize):
XDirection.append((x) * a * Lengthx)
YDirection.append((y) * b * Lengthy)
ZDirection.append((z) * c * Lengthz)
XDirection.append((x + 0.5) * a * Lengthx)
YDirection.append((y + 0.5) * b * Lengthy)
ZDirection.append((z + 0.5) * c * Lengthz)
filename = "lattice.txt" # %(element,lattice_constant)
with open(filename, mode="w") as fout:
fout.write("#bcc_structure_input for lammps\n")
fout.write("\n")
fout.write("%d atoms\n" % (AtomNumber))
fout.write("1 atom types\n")
fout.write("%f\t%f xlo xhi\n" % (xlo, xhi))
fout.write("%f\t%f ylo yhi\n" % (ylo, yhi))
fout.write("%f\t%f zlo zhi\n" % (zlo, zhi))
fout.write("%8.5f %8.5f %8.5f xy xz yz" % (0, 0, 0))
fout.write("\n")
fout.write("Atoms\n")
fout.write("\n")
for i in range(AtomNumber):
fout.write("%d 1 %12.7f %12.7f %12.7f\n"
% (i + 1,
XDirection[i],
YDirection[i],
ZDirection[i]))
fout.close()
os.system("cp lattice.txt lattice_%5.3f.txt" % (delta))
########### generate cfg #########################
XXDirection, YYDirection, ZZDirection = [], [], []
for z in range(zsize):
for y in range(ysize):
for x in range(xsize):
XXDirection.append((x) * a)
YYDirection.append((y) * b)
ZZDirection.append((z) * c)
XXDirection.append((x + 0.5) * a)
YYDirection.append((y + 0.5) * b)
ZZDirection.append((z + 0.5) * c)
curdir = os.getcwd()
os.chdir("mycfg")
filename = "cfg_%5.4f.cfg" % (delta)
with open(filename, mode="w") as fout:
fout.write("Number of particles = %d\n" % (AtomNumber))
fout.write("""A = 1 Angstrom
H0(1,1) = %f A
H0(1,2) = 0 A
H0(1,3) = 0 A
H0(2,1) = 0 A
H0(2,2) = %f A
H0(2,3) = 0 A
H0(3,1) = 0 A
H0(3,2) = 0 A
H0(3,3) = %f A
Transform(1,1) = 1
Transform(1,2) = %f
Transform(1,3) = %f
Transform(2,1) = %f
Transform(2,2) = 1
Transform(2,3) = %f
Transform(3,1) = %f
Transform(3,2) = %f
Transform(3,3) = 1
""" % (Transposed_Base[0, 0] * xsize * Lengthx,
Transposed_Base[1, 1] * ysize * Lengthy,
Transposed_Base[2, 2] * zsize * Lengthz,
Transposed_Base[0, 1] * xsize,
Transposed_Base[0, 2] * xsize,
Transposed_Base[1, 0] * xsize,
Transposed_Base[1, 2] * xsize,
Transposed_Base[2, 0] * xsize,
Transposed_Base[2, 1] * xsize))
for i in range(AtomNumber):
fout.write("95.94\t%s\t%7.7f\t %7.7f\t %7.7f\t0\t0\t0\n"
% (element,
XXDirection[i],
YYDirection[i],
ZZDirection[i]))
fout.close()
os.chdir(curdir)
return
def gn_bcc_opath(self,
delta,
Correct_strain):
element = self.element
M = self.get_M()
lattice_constant = self.lattice_constant
size = self.size
M = self.M
# original_strain = np.matrix([[1 + delta, 0, 0],
# [0, 1 + 0.2 * delta, 0],
# [0, 0, 1 - 0.2 * delta]],
# "float") + Correct_strain
Transformed_strain = M.transpose() * self.strainmtx * M
Base_vector = np.matrix([[1, 0, 0],
[0, np.sqrt(2), 0],
[0, 0, np.sqrt(2)]])
Transposed_Base = Transformed_strain * Base_vector
xsize = size[0]
ysize = size[1]
zsize = size[2]
a = 1. / xsize
b = 1. / ysize
c = 1. / zsize
Lengthx = Transposed_Base[0, :] * Transposed_Base[0, :].transpose()
Lengthy = Transposed_Base[1, :] * Transposed_Base[1, :].transpose()
Lengthz = Transposed_Base[2, :] * Transposed_Base[2, :].transpose()
Lengthx = np.sqrt(Lengthx)
Lengthx *= lattice_constant
Lengthy = np.sqrt(Lengthy)
Lengthy *= lattice_constant
Lengthz = np.sqrt(Lengthz)
Lengthz *= lattice_constant
xlo, xhi = 0.0, Lengthx * xsize
ylo, yhi = 0.0, Lengthy * ysize
zlo, zhi = 0.0, Lengthz * zsize
AtomNumber = int(xsize * ysize * zsize) * 4
XDirection, YDirection, ZDirection = [], [], []
for z in range(zsize):
for y in range(ysize):
for x in range(xsize):
XDirection.append((x) * a * Lengthx)
YDirection.append((y) * b * Lengthy)
ZDirection.append((z) * c * Lengthz)
XDirection.append((x) * a * Lengthx)
YDirection.append((y + 0.5) * b * Lengthy)
ZDirection.append((z + 0.5) * c * Lengthz)
XDirection.append((x + 0.5) * a * Lengthx)
YDirection.append((y) * b * Lengthy)
ZDirection.append((z + 0.5) * c * Lengthz)
XDirection.append((x + 0.5) * a * Lengthx)
YDirection.append((y + 0.5) * b * Lengthy)
ZDirection.append((z) * c * Lengthz)
filename = "lattice.txt" # %(element,lattice_constant)
with open(filename, mode="w") as fout:
fout.write("#bcc_structure_input for lammps\n")
fout.write("\n")
fout.write("%d atoms\n" % (AtomNumber))
fout.write("1 atom types\n")
fout.write("%f\t%f xlo xhi\n" % (xlo, xhi))
fout.write("%f\t%f ylo yhi\n" % (ylo, yhi))
fout.write("%f\t%f zlo zhi\n" % (zlo, zhi))
fout.write("%8.5f %8.5f %8.5f xy xz yz" % (0, 0, 0))
fout.write("\n")
fout.write("Atoms\n")
fout.write("\n")
for i in range(AtomNumber):
fout.write("%d 1 %12.7f %12.7f %12.7f\n"
% (i + 1,
XDirection[i],
YDirection[i],
ZDirection[i]))
fout.close()
os.system("cp lattice.txt lattice_%5.3f.txt" % (delta))
# generate cfg #
XXDirection, YYDirection, ZZDirection = [], [], []
for z in range(zsize):
for y in range(ysize):
for x in range(xsize):
XXDirection.append((x) * a)
YYDirection.append((y) * b)
ZZDirection.append((z) * c)
XXDirection.append((x) * a)
YYDirection.append((y + 0.5) * b)
ZZDirection.append((z + 0.5) * c)
XXDirection.append((x + 0.5) * a)
YYDirection.append((y) * b)
ZZDirection.append((z + 0.5) * c)
XXDirection.append((x + 0.5) * a)
YYDirection.append((y + 0.5) * b)
ZZDirection.append((z) * c)
curdir = os.getcwd()
os.chdir("mycfg")
filename = "cfg_%5.4f.cfg" % (delta)
with open(filename, mode="w") as fout:
fout.write("Number of particles = %d\n" % (AtomNumber))
fout.write("""A = 1 Angstrom
H0(1,1) = %f A
H0(1,2) = 0 A
H0(1,3) = 0 A
H0(2,1) = 0 A
H0(2,2) = %f A
H0(2,3) = 0 A
H0(3,1) = 0 A
H0(3,2) = 0 A
H0(3,3) = %f A
Transform(1,1) = 1
Transform(1,2) = %f
Transform(1,3) = %f
Transform(2,1) = %f
Transform(2,2) = 1
Transform(2,3) = %f
Transform(3,1) = %f
Transform(3,2) = %f
Transform(3,3) = 1
""" % (Transposed_Base[0, 0] * xsize * Lengthx,
Transposed_Base[1, 1] * ysize * Lengthy,
Transposed_Base[2, 2] * zsize * Lengthz,
Transposed_Base[0, 1] * xsize,
Transposed_Base[0, 2] * xsize,
Transposed_Base[1, 0] * xsize,
Transposed_Base[1, 2] * xsize,
Transposed_Base[2, 0] * xsize,
Transposed_Base[2, 1] * xsize))
for i in range(AtomNumber):
fout.write("95.94\t%s\t%7.7f\t %7.7f\t %7.7f\t0\t0\t0\n"
% (element,
XXDirection[i],
YYDirection[i],
ZZDirection[i]))
fout.close()
os.chdir(curdir)
return
class md_loop_tensile(md_tensile):
def __init__(self,
element='Nb',
lattice_constant=3.30,
size=[1, 1, 1],
orientation=[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
structure='bcc',
in_potential='dummy.lammps.ADP'):
self._flux_exe = 'lmp_mpi < in.stat_tensile'
self._looptime = 10
self._increment = 0.02
self._stress_ThrValue = 0.05
self._tensile_potential = in_potential
self._element = element
self._lattice_constant = lattice_constant
self._size = size
self._orientation = orientation
self._structure = structure
self.root_dir = os.getcwd()
return
def set_orientation(self, orientation):
self._orientation = orientation
return
def set_lattce_constant(self, lattice_constant):
self._lattice_constant = lattice_constant
return
def set_loop_time(self, loop_time):
self._looptime = loop_time
return
def set_potential(self, potential):
self._tensile_potential = potential
return
def get_strain(self,
Sij,
stress,
stressPrime):
tag = "constant"
strain = np.zeros(6).transpose()
if tag == "constant":
coeff = 1.0
strain[1] = Sij[0, 1] * stress[1] * coeff
strain[2] = Sij[0, 2] * stress[2] * coeff
else:
stressPrime = float(stressPrime)
coeff = 0.06 + 0.06 * stressPrime ** 0.20 + \
0.03 * stressPrime ** 0.19 + \
0.01 * stressPrime ** 0.1
if stressPrime > 20:
coeff = 0.08 + 0.01 * stressPrime ** 0.19 + \
0.09 * stressPrime ** 0.1
strain = -Sij * stress * coeff
return strain
def lammps_job(self,
option,
job):
if option == 'TP' or option == 'tpath':
directory = "TP_%s_%d" % (self._element,
job)
output_data_file = os.path.join(self.root_dir, 'TP-DATA')
elif option == 'OP' or option == 'opath':
directory = "OP_%s_%d" % (self._element,
job)
output_data_file = os.path.join(self.root_dir, 'OP-DATA')
if os.path.isdir(directory):
shutil.rmtree(directory)
os.mkdir(directory)
shutil.copy("./in.stat_tensile", directory)
shutil.copy(self._tensile_potential, directory)
os.system("cp ~/src/Data_process/cij_sij.txt %s" % (directory))
os.chdir(directory)
self.gn_md_tensile(potential_file=self._tensile_potential,
element=self._element)
os.mkdir("cfg")
os.mkdir("mycfg")
os.mkdir("dump")
os.mkdir("restart")
os.mkdir("xyz")
if os.path.isfile(output_data_file):
os.system(": > %s" % (output_data_file))
# main function #
Correct_strain = np.mat([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]], "float")
md_tensile.__init__(self,
self._element,
self._lattice_constant,
self._size,
self._orientation,
self._structure)
delta = job * 0.02
count = 0
coeff = 1.0
self.strainmtx = np.matrix([[1 + delta, 0, 0],
[0, 1 + 0.2 * delta, 0],
[0, 0, 1 - 0.2 * delta]],
"float") + Correct_strain
strain = np.zeros(6).transpose()
while True:
if option == 'TP':
self.gn_bcc_tpath(delta, Correct_strain)
elif option == 'OP':
self.gn_bcc_opath(delta, Correct_strain)
# os.system("cat lattice.txt >> backup.txt")
os.system("%s > Log_MD" % (self._flux_exe))
# lx, ly, lz #
self.lx, self.ly, self.lz = self.md_get_lx_ly_lz_from_Log("Log_MD")
self.stress_original = self.md_get_stress()
stress = copy.deepcopy(self.stress_original)
stress[0] = 0.0
stress_abs = np.abs(stress)
stressPrime = np.max(stress_abs)
if stressPrime < self._stress_ThrValue:
self.output_md_tensile(delta)
break
elif count > 500:
self.output_md_tensile(delta)
break
else:
# update strain #
print("Sij = ", self.Sij[0, 1], self.Sij[0, 2])
if abs(stress[1]) > self._stress_ThrValue:
strain[1] = self.Sij[0, 1] * stress[1] * coeff
self.strainmtx[1, 1] += strain[1]
if abs(stress[2]) > self._stress_ThrValue:
strain[2] = self.Sij[0, 2] * stress[2] * coeff
self.strainmtx[2, 2] += strain[2]
# Correct_strain = self.update_strain(strain,
# stress,
# Correct_strain)
with open('monitor.txt', 'a') as fid:
print("delta ", delta, file=fid)
print("Run times", count, file=fid)
print("stress_original", self.stress_original, file=fid)
print("stressPrime ", stressPrime, file=fid)
fid.close()
count += 1
os.system("cat DATA >> %s" % (output_data_file))
os.chdir(self.root_dir)
return (delta, self.stress_original[0, 0])
def cal_md_tensile(self,
options):
pool = Pool(processes=self._looptime)
List = np.arange(self._looptime)
results = pool.map(unwrap_self_run_lammps,
list(zip([self] * len(List),
[options] * len(List),
List)))
strain = []
stress = []
for i in range(self._looptime):
strain.append(results[i][0])
stress.append(results[i][1])
return (strain, stress)
def output_log(self, strain, stresstp, stressop):
with open("tensile.log", "w") as fid:
for i in range(len(strain)):
fid.write("%8.5f %10.5f %10.5f\n"
% (strain[i],
stresstp[i],
stressop[i]))
fid.close()
# os.system("rm -rf TP_%s_*" % (self._element))
# os.system("rm -rf OP_%s_*" % (self._element))
return
def set_lattice_constant(self, opt="given"):
if opt == "given":
self.lattice_constant = float(sys.argv[1])
return
if __name__ == "__main__":
makePot = "dummy.lammps.ADP"
givenPot = "Nb.eam.alloy.webarchive"
M = md_loop_tensile(element='Nb',
lattice_constant=3.34349187552591,
size=[1, 1, 1],
orientation=[1, 0, 0, 0, 1, 0],
structure='bcc',
in_potential=makePot)
(strain, stress) = M.cal_md_tensile('TP')
# (strain2, stress2) = M.cal_md_tensile('OP')
# M.output_log(strain, stress, stress2)
| [
"[email protected]"
] | |
af9e15439ac1cd30d4c7ab1cfd3ee48556886d25 | 4fbd844113ec9d8c526d5f186274b40ad5502aa3 | /algorithms/python3/falling_squares.py | a023207cd18d93a6579e10dfbd0759cd751144ce | [] | no_license | capric8416/leetcode | 51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1 | 503b2e303b10a455be9596c31975ee7973819a3c | refs/heads/master | 2022-07-16T21:41:07.492706 | 2020-04-22T06:18:16 | 2020-04-22T06:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
On an infinite number line (x-axis), we drop given squares in the order they are given.
The i-th square dropped (positions[i] = (left, side_length)) is a square with the left-most point being positions[i][0] and sidelength positions[i][1].
The square is dropped with the bottom edge parallel to the number line, and from a higher height than all currently landed squares.
We wait for each square to stick before dropping the next.
The squares are infinitely sticky on their bottom edge, and will remain fixed to any positive length surface they touch (either the number line or another square).
Squares dropped adjacent to each other will not stick together prematurely.
Return a list ans of heights.
Each height ans[i] represents the current highest height of any square we have dropped, after dropping squares represented by positions[0], positions[1], ..., positions[i].
Example 1:
Input: [[1, 2], [2, 3], [6, 1]]
Output: [2, 5, 5]
Explanation:
After the first drop of positions[0] = [1, 2]:
_aa
_aa
-------
The maximum height of any square is 2.
After the second drop of positions[1] = [2, 3]:
__aaa
__aaa
__aaa
_aa__
_aa__
--------------
The maximum height of any square is 5.
The larger square stays on top of the smaller square despite where its center
of gravity is, because squares are infinitely sticky on their bottom edge.
After the third drop of positions[1] = [6, 1]:
__aaa
__aaa
__aaa
_aa
_aa___a
--------------
The maximum height of any square is still 5.
Thus, we return an answer of [2, 5, 5].
Example 2:
Input: [[100, 100], [200, 100]]
Output: [100, 100]
Explanation: Adjacent squares don't get stuck prematurely - only their bottom edge can stick to surfaces.
Note:
1 <= positions.length <= 1000.
1 <= positions[i][0] <= 10^8.
1 <= positions[i][1] <= 10^6.
"""
""" ==================== body ==================== """
class Solution:
def fallingSquares(self, positions):
"""
:type positions: List[List[int]]
:rtype: List[int]
"""
""" ==================== body ==================== """
| [
"[email protected]"
] | |
0e12c73f039b7af871f1a97860bb6eacd11cf968 | 55692ac1b8a1b00750c0b9caf7ebba53f1dde78b | /server/common_models/tip.py | 4e771aad6c7c678c58c788036a3067d968b6eb5d | [] | no_license | Soopro/julolo | 8d9dea62aa055318f891d200614314e402bda1eb | 73cc67f378f45c0da40911bac5e5e038f63588ab | refs/heads/master | 2021-10-26T08:50:58.940548 | 2019-04-11T15:41:12 | 2019-04-11T15:41:12 | 107,217,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | # coding=utf-8
from __future__ import absolute_import
from utils.misc import now
from document import BaseDocument, ObjectId, INDEX_DESC, INDEX_ASC
class Tip(BaseDocument):
STATUS_OFF, STATUS_ON = 0, 1
MAX_STORAGE = 600
MAX_QUERY = 60
structure = {
'key': unicode,
'title': unicode,
'content': unicode,
'src': unicode,
'priority': int,
'status': int,
'updated': int,
'creation': int
}
required_fields = ['key']
default_values = {
'title': u'',
'content': u'',
'src': u'',
'priority': 0,
'status': STATUS_OFF,
'creation': now,
'updated': now,
}
indexes = [
{
'fields': ['key'],
'unique': True,
},
{
'fields': [('priority', INDEX_ASC), ('updated', INDEX_DESC)],
},
{
'fields': ['status'],
},
]
def find_one_by_id(self, _id):
return self.find_one({
'_id': ObjectId(_id),
})
def find_one_by_key(self, key):
return self.find_one({
'key': key
})
def find_all(self):
sorts = [('priority', INDEX_ASC), ('updated', INDEX_DESC)]
return self.find().sort(sorts).limit(self.MAX_QUERY)
def find_activated(self):
sorts = [('priority', INDEX_ASC), ('updated', INDEX_DESC)]
return self.find({
'status': self.STATUS_ON,
}).sort(sorts).limit(self.MAX_QUERY)
| [
"[email protected]"
] | |
a299aa698b5743a46ec7fb78779dc2e1a7e025a9 | 390e9c2561bf4e32efc9a52a9a105f74b77d0a81 | /TrainerMnist2.py | e4f5409a9f4883bf17880be21d4146e3813990d8 | [] | no_license | prutoplox/ForschungsprojektWS18-19 | 4ad2214b1ef3c0949f2dee2fa8c934ba844cb6fd | c9509e0d4ca67e0deac3a7650f86187e17b12c0c | refs/heads/master | 2020-04-10T21:46:04.056353 | 2018-12-20T14:57:33 | 2018-12-20T14:57:33 | 161,305,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,096 | py | #Codeauschnitte aus Vorlesung von Herrn Prof. Dr. Gepperth HS Fulda
mnistPath = "/Users/mh/PycharmProjects/Forschungsprojekt/venv/include/mnist.pkl.gz"
import matplotlib as mp ;
mp.use("Qt4Agg") ;
import gzip, pickle,numpy as np, matplotlib.pyplot as plt ;
import numpy.random as npr, tensorflow as tf, sys ;
from matplotlib.widgets import Button ;
import math ;
# expects 1D array
def sm(arr):
num = np.exp(arr) ;
den = num.sum() ;
return num/den ;
def test_cb(self):
global testit ;
ax1.cla();
ax2.cla();
ax3.cla();
ax1.imshow(testd[testit].reshape(28,28)) ;
confs =sm(testout[testit]) ;
ax2.bar(range(0,10),confs);
ax2.set_ylim(0,1.)
ce = -(confs*np.log(confs+0.00000001)).sum() ;
ax3.text(0.5,0.5,str(ce),fontsize=20)
testit = testit + 1;
f.canvas.draw();
print ("--------------------") ;
print("logits", testout[testit], "probabilities", sm(testout[testit]), "decision", testout[testit].argmax(), "label", testl[testit].argmax()) ;
sess = tf.Session();
with gzip.open(mnistPath, 'rb') as f:
((traind,trainl),(vald,vall),(testd,testl))=pickle.load(f, encoding='bytes')
data_placeholder = tf.placeholder(tf.float32,[None,784]) ;
label_placeholder = tf.placeholder(tf.float32,[None,10]) ;
fd = {data_placeholder: traind, label_placeholder : trainl } ;
#fd = {}
s = 0; # Init bei 0?
smax = 1; # 1? Woher ermitteln?
# B = total number of batches #Returns scalar
B = tf.shape(data_placeholder)[0];
# b = batch Index
b = sess.run(B, {data_placeholder: testd});
Wh1 = tf.Variable(npr.uniform(-0.01,0.01, [784,200]),dtype=tf.float32, name ="Wh1") ;
bh1 = tf.Variable(npr.uniform(-0.01,0.01, [1,200]),dtype=tf.float32, name ="bh1") ;
Wh2 = tf.Variable(npr.uniform(-0.1,0.1, [200,200]),dtype=tf.float32, name ="Wh2") ;
bh2 = tf.Variable(npr.uniform(-0.01,0.01, [1,200]),dtype=tf.float32, name ="bh2") ;
Wh3 = tf.Variable(npr.uniform(-0.1,0.1, [200,200]),dtype=tf.float32, name ="Wh3") ;
bh3 = tf.Variable(npr.uniform(-0.01,0.01, [1,200]),dtype=tf.float32, name ="bh3") ;
W = tf.Variable(npr.uniform(-0.01,0.01, [200,10]),dtype=tf.float32, name ="W") ;
b = tf.Variable(npr.uniform(-0.01,0.01, [1,10]),dtype=tf.float32, name ="b") ;
sess.run(tf.global_variables_initializer()) ;
#elementwise max first only zeros
aMax0 = tf.zeros(B, tf.float32);
# e = embedding task = layer output???
#task t -> embedding etl -> sigmoid mit etl ->
l1 = tf.nn.relu(tf.matmul(data_placeholder, Wh1) + bh1) ;
a1 = tf.nn.sigmoid(tf.math.multiply(tf.to_float(s), l1)); # y = 1 / (1 + exp(-x)) Sigmoid != HAT
aMax1 = tf.math.maximum(aMax0, a1);
print(l1)
l2 = tf.nn.relu(tf.matmul(l1, Wh2) + bh2) ;
a2 = tf.nn.sigmoid(tf.math.multiply(tf.to_float(s), l2));
aMax2 = tf.math.maximum(a1, a2);
print(l2)
l3 = tf.nn.relu(tf.matmul(l2, Wh3) + bh3) ;
a3 = tf.nn.sigmoid(tf.math.multiply(tf.to_float(s), l3)); # Last Layer Binary Hardcoded TODO
aMax3 = tf.math.maximum(a2, a3);
print(l2)
#Kreuzproduktverhältnis
logits = tf.matmul(l3, W)+b;
print(logits)
lossBySample = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=label_placeholder) ;
print(lossBySample) ;
loss = tf.reduce_mean(lossBySample) ;
# classification accuracy
nrCorrect = tf.reduce_mean(tf.cast(tf.equal (tf.argmax(logits,axis=1), tf.argmax(label_placeholder,axis=1)), tf.float32)) ;
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.2) ; # 0.1 in HAT
update = optimizer.minimize(loss) ;
iteration = 0 ;
for iteration in range(0,100): # 0,50 in HAT
sess.run(update, feed_dict = fd) ;
correct, lossVal,_W = sess.run([nrCorrect, loss,W], feed_dict = fd) ;
#anneal s function.
#cast for multiplication
s = (1 / smax) + (smax - (1 / smax)) - ((b.eval(sess) - 1) / (B - 1));
s = tf.cast(s , tf.float32) ;
print("epoch", iteration, "acc=", float(correct), "loss=", lossVal, "wmM=", _W.min(), _W.max(), "s=", s);
testout = sess.run(logits, feed_dict = {data_placeholder : testd}) ;
testit = 0 ;
f,(ax1,ax2,ax3) = plt.subplots(nrows=1,ncols=3) ;
f.canvas.mpl_connect('button_press_event', test_cb)
plt.show();
ax = f.gca() ;
| [
"[email protected]"
] | |
35d22711fe8c5eddc9edb0c5057ad0a1428c09bd | 1a52315a176bd011d93e16ab51603e4ee92b8fae | /Hackerrank/Algo/Python/AngryProfessor.py | 1f664ba25d3f218dfc5a4b85d7ca45a5a82cf3ef | [] | no_license | divyamagwl/CompetitiveProgramming | c2fd3a2a02e3ecac0d06f2de550c84337a261b73 | fe2feb342e0a4d971c7809c0ee4f6290d5231b77 | refs/heads/master | 2023-02-11T01:28:22.744590 | 2021-01-09T12:40:34 | 2021-01-09T12:40:34 | 240,654,489 | 3 | 3 | null | 2020-10-01T10:46:34 | 2020-02-15T06:08:42 | C | UTF-8 | Python | false | false | 675 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the angryProfessor function below.
def angryProfessor(k, a):
count =0
for i in a:
print(count)
if i <=0:
count += 1
#print(count,k)
if count >= k:
return "NO"
else:
return "YES"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
nk = input().split()
n = int(nk[0])
k = int(nk[1])
a = list(map(int, input().rstrip().split()))
result = angryProfessor(k, a)
fptr.write(result + '\n')
fptr.close()
| [
"[email protected]"
] | |
ec147ea8dd3a4c98d80872eeafbb83b590030f7b | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/monitor-control-service/azext_amcs/vendored_sdks/amcs/models/_models_py3.py | eac1042ad236408336213472f74539d592aa4a31 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 78,575 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._monitor_client_enums import *
class AzureMonitorMetricsDestination(msrest.serialization.Model):
"""Azure Monitor Metrics destination.
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(AzureMonitorMetricsDestination, self).__init__(**kwargs)
self.name = name
class ColumnDefinition(msrest.serialization.Model):
"""Definition of custom data column.
:param name: The name of the column.
:type name: str
:param type: The type of the column data. Possible values include: "string", "int", "long",
"real", "boolean", "datetime", "dynamic".
:type type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownColumnDefinitionType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[Union[str, "KnownColumnDefinitionType"]] = None,
**kwargs
):
super(ColumnDefinition, self).__init__(**kwargs)
self.name = name
self.type = type
class ConfigurationAccessEndpointSpec(msrest.serialization.Model):
"""Definition of the endpoint used for accessing configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar endpoint: The endpoint. This property is READ-ONLY.
:vartype endpoint: str
"""
_validation = {
'endpoint': {'readonly': True},
}
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationAccessEndpointSpec, self).__init__(**kwargs)
self.endpoint = None
class DataCollectionEndpoint(msrest.serialization.Model):
"""Definition of data collection endpoint.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection endpoint.
:type description: str
:param immutable_id: The immutable ID of this data collection endpoint resource. This property
is READ-ONLY.
:type immutable_id: str
:param configuration_access: The endpoint used by clients to access their configuration.
:type configuration_access: ~$(python-base-
namespace).v2021_09_01_preview.models.ConfigurationAccessEndpointSpec
:param logs_ingestion: The endpoint used by clients to ingest logs.
:type logs_ingestion: ~$(python-base-
namespace).v2021_09_01_preview.models.LogsIngestionEndpointSpec
:param network_acls: Network access control rules for the endpoints.
:type network_acls: ~$(python-base-namespace).v2021_09_01_preview.models.NetworkRuleSet
:ivar provisioning_state: The resource provisioning state. This property is READ-ONLY. Possible
values include: "Creating", "Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionEndpointProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'configuration_access': {'key': 'configurationAccess', 'type': 'ConfigurationAccessEndpointSpec'},
'logs_ingestion': {'key': 'logsIngestion', 'type': 'LogsIngestionEndpointSpec'},
'network_acls': {'key': 'networkAcls', 'type': 'NetworkRuleSet'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
immutable_id: Optional[str] = None,
configuration_access: Optional["ConfigurationAccessEndpointSpec"] = None,
logs_ingestion: Optional["LogsIngestionEndpointSpec"] = None,
network_acls: Optional["NetworkRuleSet"] = None,
**kwargs
):
super(DataCollectionEndpoint, self).__init__(**kwargs)
self.description = description
self.immutable_id = immutable_id
self.configuration_access = configuration_access
self.logs_ingestion = logs_ingestion
self.network_acls = network_acls
self.provisioning_state = None
class DataCollectionEndpointConfigurationAccess(ConfigurationAccessEndpointSpec):
"""The endpoint used by clients to access their configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar endpoint: The endpoint. This property is READ-ONLY.
:vartype endpoint: str
"""
_validation = {
'endpoint': {'readonly': True},
}
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionEndpointConfigurationAccess, self).__init__(**kwargs)
class LogsIngestionEndpointSpec(msrest.serialization.Model):
"""Definition of the endpoint used for ingesting logs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar endpoint: The endpoint. This property is READ-ONLY.
:vartype endpoint: str
"""
_validation = {
'endpoint': {'readonly': True},
}
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogsIngestionEndpointSpec, self).__init__(**kwargs)
self.endpoint = None
class DataCollectionEndpointLogsIngestion(LogsIngestionEndpointSpec):
"""The endpoint used by clients to ingest logs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar endpoint: The endpoint. This property is READ-ONLY.
:vartype endpoint: str
"""
_validation = {
'endpoint': {'readonly': True},
}
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionEndpointLogsIngestion, self).__init__(**kwargs)
class NetworkRuleSet(msrest.serialization.Model):
"""Definition of the network rules.
:param public_network_access: The configuration to set whether network access from public
internet to the endpoints are allowed. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownPublicNetworkAccessOptions
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
*,
public_network_access: Optional[Union[str, "KnownPublicNetworkAccessOptions"]] = None,
**kwargs
):
super(NetworkRuleSet, self).__init__(**kwargs)
self.public_network_access = public_network_access
class DataCollectionEndpointNetworkAcls(NetworkRuleSet):
"""Network access control rules for the endpoints.
:param public_network_access: The configuration to set whether network access from public
internet to the endpoints are allowed. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownPublicNetworkAccessOptions
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
*,
public_network_access: Optional[Union[str, "KnownPublicNetworkAccessOptions"]] = None,
**kwargs
):
super(DataCollectionEndpointNetworkAcls, self).__init__(public_network_access=public_network_access, **kwargs)
class DataCollectionEndpointResource(msrest.serialization.Model):
"""Definition of ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The geo-location where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: The kind of the resource. Possible values include: "Linux", "Windows".
:type kind: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionEndpointResourceKind
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~$(python-base-namespace).v2021_09_01_preview.models.SystemData
:param description: Description of the data collection endpoint.
:type description: str
:param immutable_id: The immutable ID of this data collection endpoint resource. This property
is READ-ONLY.
:type immutable_id: str
:param configuration_access: The endpoint used by clients to access their configuration.
:type configuration_access: ~$(python-base-
namespace).v2021_09_01_preview.models.ConfigurationAccessEndpointSpec
:param logs_ingestion: The endpoint used by clients to ingest logs.
:type logs_ingestion: ~$(python-base-
namespace).v2021_09_01_preview.models.LogsIngestionEndpointSpec
:param network_acls: Network access control rules for the endpoints.
:type network_acls: ~$(python-base-namespace).v2021_09_01_preview.models.NetworkRuleSet
:ivar provisioning_state: The resource provisioning state. This property is READ-ONLY. Possible
values include: "Creating", "Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionEndpointProvisioningState
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'immutable_id': {'key': 'properties.immutableId', 'type': 'str'},
'configuration_access': {'key': 'properties.configurationAccess', 'type': 'ConfigurationAccessEndpointSpec'},
'logs_ingestion': {'key': 'properties.logsIngestion', 'type': 'LogsIngestionEndpointSpec'},
'network_acls': {'key': 'properties.networkAcls', 'type': 'NetworkRuleSet'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
kind: Optional[Union[str, "KnownDataCollectionEndpointResourceKind"]] = None,
description: Optional[str] = None,
immutable_id: Optional[str] = None,
configuration_access: Optional["ConfigurationAccessEndpointSpec"] = None,
logs_ingestion: Optional["LogsIngestionEndpointSpec"] = None,
network_acls: Optional["NetworkRuleSet"] = None,
**kwargs
):
super(DataCollectionEndpointResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
self.kind = kind
self.id = None
self.name = None
self.type = None
self.etag = None
self.system_data = None
self.description = description
self.immutable_id = immutable_id
self.configuration_access = configuration_access
self.logs_ingestion = logs_ingestion
self.network_acls = network_acls
self.provisioning_state = None
class DataCollectionEndpointResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value: list[~$(python-base-
namespace).v2021_09_01_preview.models.DataCollectionEndpointResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionEndpointResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DataCollectionEndpointResource"],
next_link: Optional[str] = None,
**kwargs
):
super(DataCollectionEndpointResourceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DataCollectionEndpointResourceProperties(DataCollectionEndpoint):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection endpoint.
:type description: str
:param immutable_id: The immutable ID of this data collection endpoint resource. This property
is READ-ONLY.
:type immutable_id: str
:param configuration_access: The endpoint used by clients to access their configuration.
:type configuration_access: ~$(python-base-
namespace).v2021_09_01_preview.models.ConfigurationAccessEndpointSpec
:param logs_ingestion: The endpoint used by clients to ingest logs.
:type logs_ingestion: ~$(python-base-
namespace).v2021_09_01_preview.models.LogsIngestionEndpointSpec
:param network_acls: Network access control rules for the endpoints.
:type network_acls: ~$(python-base-namespace).v2021_09_01_preview.models.NetworkRuleSet
:ivar provisioning_state: The resource provisioning state. This property is READ-ONLY. Possible
values include: "Creating", "Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionEndpointProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'configuration_access': {'key': 'configurationAccess', 'type': 'ConfigurationAccessEndpointSpec'},
'logs_ingestion': {'key': 'logsIngestion', 'type': 'LogsIngestionEndpointSpec'},
'network_acls': {'key': 'networkAcls', 'type': 'NetworkRuleSet'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
immutable_id: Optional[str] = None,
configuration_access: Optional["ConfigurationAccessEndpointSpec"] = None,
logs_ingestion: Optional["LogsIngestionEndpointSpec"] = None,
network_acls: Optional["NetworkRuleSet"] = None,
**kwargs
):
super(DataCollectionEndpointResourceProperties, self).__init__(description=description, immutable_id=immutable_id, configuration_access=configuration_access, logs_ingestion=logs_ingestion, network_acls=network_acls, **kwargs)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class DataCollectionEndpointResourceSystemData(SystemData):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(DataCollectionEndpointResourceSystemData, self).__init__(created_by=created_by, created_by_type=created_by_type, created_at=created_at, last_modified_by=last_modified_by, last_modified_by_type=last_modified_by_type, last_modified_at=last_modified_at, **kwargs)
class DataCollectionRule(msrest.serialization.Model):
"""Definition of what monitoring data to collect and where that data should be sent.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_collection_endpoint_id: The resource ID of the data collection endpoint that this
rule can be used with.
:type data_collection_endpoint_id: str
:ivar metadata: Metadata about the resource.
:vartype metadata: ~$(python-base-namespace).v2021_09_01_preview.models.Metadata
:param stream_declarations: Declaration of custom streams used in this rule.
:type stream_declarations: dict[str, ~$(python-base-
namespace).v2021_09_01_preview.models.StreamDeclaration]
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources: ~$(python-base-namespace).v2021_09_01_preview.models.DataSourcesSpec
:param destinations: The specification of destinations.
:type destinations: ~$(python-base-namespace).v2021_09_01_preview.models.DestinationsSpec
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2021_09_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'immutable_id': {'readonly': True},
'metadata': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'data_collection_endpoint_id': {'key': 'dataCollectionEndpointId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'Metadata'},
'stream_declarations': {'key': 'streamDeclarations', 'type': '{StreamDeclaration}'},
'data_sources': {'key': 'dataSources', 'type': 'DataSourcesSpec'},
'destinations': {'key': 'destinations', 'type': 'DestinationsSpec'},
'data_flows': {'key': 'dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_endpoint_id: Optional[str] = None,
stream_declarations: Optional[Dict[str, "StreamDeclaration"]] = None,
data_sources: Optional["DataSourcesSpec"] = None,
destinations: Optional["DestinationsSpec"] = None,
data_flows: Optional[List["DataFlow"]] = None,
**kwargs
):
super(DataCollectionRule, self).__init__(**kwargs)
self.description = description
self.immutable_id = None
self.data_collection_endpoint_id = data_collection_endpoint_id
self.metadata = None
self.stream_declarations = stream_declarations
self.data_sources = data_sources
self.destinations = destinations
self.data_flows = data_flows
self.provisioning_state = None
class DataCollectionRuleAssociation(msrest.serialization.Model):
"""Definition of association of a data collection rule with a monitored Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:param data_collection_endpoint_id: The resource ID of the data collection endpoint that is to
be associated.
:type data_collection_endpoint_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
:ivar metadata: Metadata about the resource.
:vartype metadata: ~$(python-base-namespace).v2021_09_01_preview.models.Metadata
"""
_validation = {
'provisioning_state': {'readonly': True},
'metadata': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'data_collection_rule_id': {'key': 'dataCollectionRuleId', 'type': 'str'},
'data_collection_endpoint_id': {'key': 'dataCollectionEndpointId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'Metadata'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_rule_id: Optional[str] = None,
data_collection_endpoint_id: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociation, self).__init__(**kwargs)
self.description = description
self.data_collection_rule_id = data_collection_rule_id
self.data_collection_endpoint_id = data_collection_endpoint_id
self.provisioning_state = None
self.metadata = None
class Metadata(msrest.serialization.Model):
"""Metadata about the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioned_by: Azure offering managing this resource on-behalf-of customer.
:vartype provisioned_by: str
"""
_validation = {
'provisioned_by': {'readonly': True},
}
_attribute_map = {
'provisioned_by': {'key': 'provisionedBy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Metadata, self).__init__(**kwargs)
self.provisioned_by = None
class DataCollectionRuleAssociationMetadata(Metadata):
"""Metadata about the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioned_by: Azure offering managing this resource on-behalf-of customer.
:vartype provisioned_by: str
"""
_validation = {
'provisioned_by': {'readonly': True},
}
_attribute_map = {
'provisioned_by': {'key': 'provisionedBy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleAssociationMetadata, self).__init__(**kwargs)
class DataCollectionRuleAssociationProxyOnlyResource(msrest.serialization.Model):
"""Definition of generic ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~$(python-base-namespace).v2021_09_01_preview.models.SystemData
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:param data_collection_endpoint_id: The resource ID of the data collection endpoint that is to
be associated.
:type data_collection_endpoint_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
:ivar metadata: Metadata about the resource.
:vartype metadata: ~$(python-base-namespace).v2021_09_01_preview.models.Metadata
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'metadata': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'data_collection_rule_id': {'key': 'properties.dataCollectionRuleId', 'type': 'str'},
'data_collection_endpoint_id': {'key': 'properties.dataCollectionEndpointId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'Metadata'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_rule_id: Optional[str] = None,
data_collection_endpoint_id: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.etag = None
self.system_data = None
self.description = description
self.data_collection_rule_id = data_collection_rule_id
self.data_collection_endpoint_id = data_collection_endpoint_id
self.provisioning_state = None
self.metadata = None
class DataCollectionRuleAssociationProxyOnlyResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value: list[~$(python-base-
namespace).v2021_09_01_preview.models.DataCollectionRuleAssociationProxyOnlyResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionRuleAssociationProxyOnlyResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DataCollectionRuleAssociationProxyOnlyResource"],
next_link: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DataCollectionRuleAssociationProxyOnlyResourceProperties(DataCollectionRuleAssociation):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:param data_collection_endpoint_id: The resource ID of the data collection endpoint that is to
be associated.
:type data_collection_endpoint_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
:ivar metadata: Metadata about the resource.
:vartype metadata: ~$(python-base-namespace).v2021_09_01_preview.models.Metadata
"""
_validation = {
'provisioning_state': {'readonly': True},
'metadata': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'data_collection_rule_id': {'key': 'dataCollectionRuleId', 'type': 'str'},
'data_collection_endpoint_id': {'key': 'dataCollectionEndpointId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'Metadata'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_rule_id: Optional[str] = None,
data_collection_endpoint_id: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceProperties, self).__init__(description=description, data_collection_rule_id=data_collection_rule_id, data_collection_endpoint_id=data_collection_endpoint_id, **kwargs)
class DataCollectionRuleAssociationProxyOnlyResourceSystemData(SystemData):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceSystemData, self).__init__(created_by=created_by, created_by_type=created_by_type, created_at=created_at, last_modified_by=last_modified_by, last_modified_by_type=last_modified_by_type, last_modified_at=last_modified_at, **kwargs)
class DataSourcesSpec(msrest.serialization.Model):
"""Specification of data sources that will be collected.
:param performance_counters: The list of performance counter data source configurations.
:type performance_counters: list[~$(python-base-
namespace).v2021_09_01_preview.models.PerfCounterDataSource]
:param windows_event_logs: The list of Windows Event Log data source configurations.
:type windows_event_logs: list[~$(python-base-
namespace).v2021_09_01_preview.models.WindowsEventLogDataSource]
:param syslog: The list of Syslog data source configurations.
:type syslog: list[~$(python-base-namespace).v2021_09_01_preview.models.SyslogDataSource]
:param extensions: The list of Azure VM extension data source configurations.
:type extensions: list[~$(python-base-
namespace).v2021_09_01_preview.models.ExtensionDataSource]
:param log_files: The list of Log files source configurations.
:type log_files: list[~$(python-base-namespace).v2021_09_01_preview.models.LogFilesDataSource]
:param iis_logs: The list of IIS logs source configurations.
:type iis_logs: list[~$(python-base-namespace).v2021_09_01_preview.models.IisLogsDataSource]
"""
_attribute_map = {
'performance_counters': {'key': 'performanceCounters', 'type': '[PerfCounterDataSource]'},
'windows_event_logs': {'key': 'windowsEventLogs', 'type': '[WindowsEventLogDataSource]'},
'syslog': {'key': 'syslog', 'type': '[SyslogDataSource]'},
'extensions': {'key': 'extensions', 'type': '[ExtensionDataSource]'},
'log_files': {'key': 'logFiles', 'type': '[LogFilesDataSource]'},
'iis_logs': {'key': 'iisLogs', 'type': '[IisLogsDataSource]'},
}
def __init__(
self,
*,
performance_counters: Optional[List["PerfCounterDataSource"]] = None,
windows_event_logs: Optional[List["WindowsEventLogDataSource"]] = None,
syslog: Optional[List["SyslogDataSource"]] = None,
extensions: Optional[List["ExtensionDataSource"]] = None,
log_files: Optional[List["LogFilesDataSource"]] = None,
iis_logs: Optional[List["IisLogsDataSource"]] = None,
**kwargs
):
super(DataSourcesSpec, self).__init__(**kwargs)
self.performance_counters = performance_counters
self.windows_event_logs = windows_event_logs
self.syslog = syslog
self.extensions = extensions
self.log_files = log_files
self.iis_logs = iis_logs
class DataCollectionRuleDataSources(DataSourcesSpec):
"""The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param performance_counters: The list of performance counter data source configurations.
:type performance_counters: list[~$(python-base-
namespace).v2021_09_01_preview.models.PerfCounterDataSource]
:param windows_event_logs: The list of Windows Event Log data source configurations.
:type windows_event_logs: list[~$(python-base-
namespace).v2021_09_01_preview.models.WindowsEventLogDataSource]
:param syslog: The list of Syslog data source configurations.
:type syslog: list[~$(python-base-namespace).v2021_09_01_preview.models.SyslogDataSource]
:param extensions: The list of Azure VM extension data source configurations.
:type extensions: list[~$(python-base-
namespace).v2021_09_01_preview.models.ExtensionDataSource]
:param log_files: The list of Log files source configurations.
:type log_files: list[~$(python-base-namespace).v2021_09_01_preview.models.LogFilesDataSource]
:param iis_logs: The list of IIS logs source configurations.
:type iis_logs: list[~$(python-base-namespace).v2021_09_01_preview.models.IisLogsDataSource]
"""
_attribute_map = {
'performance_counters': {'key': 'performanceCounters', 'type': '[PerfCounterDataSource]'},
'windows_event_logs': {'key': 'windowsEventLogs', 'type': '[WindowsEventLogDataSource]'},
'syslog': {'key': 'syslog', 'type': '[SyslogDataSource]'},
'extensions': {'key': 'extensions', 'type': '[ExtensionDataSource]'},
'log_files': {'key': 'logFiles', 'type': '[LogFilesDataSource]'},
'iis_logs': {'key': 'iisLogs', 'type': '[IisLogsDataSource]'},
}
def __init__(
self,
*,
performance_counters: Optional[List["PerfCounterDataSource"]] = None,
windows_event_logs: Optional[List["WindowsEventLogDataSource"]] = None,
syslog: Optional[List["SyslogDataSource"]] = None,
extensions: Optional[List["ExtensionDataSource"]] = None,
log_files: Optional[List["LogFilesDataSource"]] = None,
iis_logs: Optional[List["IisLogsDataSource"]] = None,
**kwargs
):
super(DataCollectionRuleDataSources, self).__init__(performance_counters=performance_counters, windows_event_logs=windows_event_logs, syslog=syslog, extensions=extensions, log_files=log_files, iis_logs=iis_logs, **kwargs)
class DestinationsSpec(msrest.serialization.Model):
"""Specification of destinations that can be used in data flows.
:param log_analytics: List of Log Analytics destinations.
:type log_analytics: list[~$(python-base-
namespace).v2021_09_01_preview.models.LogAnalyticsDestination]
:param azure_monitor_metrics: Azure Monitor Metrics destination.
:type azure_monitor_metrics: ~$(python-base-
namespace).v2021_09_01_preview.models.AzureMonitorMetricsDestination
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': '[LogAnalyticsDestination]'},
'azure_monitor_metrics': {'key': 'azureMonitorMetrics', 'type': 'AzureMonitorMetricsDestination'},
}
def __init__(
self,
*,
log_analytics: Optional[List["LogAnalyticsDestination"]] = None,
azure_monitor_metrics: Optional["AzureMonitorMetricsDestination"] = None,
**kwargs
):
super(DestinationsSpec, self).__init__(**kwargs)
self.log_analytics = log_analytics
self.azure_monitor_metrics = azure_monitor_metrics
class DataCollectionRuleDestinations(DestinationsSpec):
"""The specification of destinations.
:param log_analytics: List of Log Analytics destinations.
:type log_analytics: list[~$(python-base-
namespace).v2021_09_01_preview.models.LogAnalyticsDestination]
:param azure_monitor_metrics: Azure Monitor Metrics destination.
:type azure_monitor_metrics: ~$(python-base-
namespace).v2021_09_01_preview.models.AzureMonitorMetricsDestination
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': '[LogAnalyticsDestination]'},
'azure_monitor_metrics': {'key': 'azureMonitorMetrics', 'type': 'AzureMonitorMetricsDestination'},
}
def __init__(
self,
*,
log_analytics: Optional[List["LogAnalyticsDestination"]] = None,
azure_monitor_metrics: Optional["AzureMonitorMetricsDestination"] = None,
**kwargs
):
super(DataCollectionRuleDestinations, self).__init__(log_analytics=log_analytics, azure_monitor_metrics=azure_monitor_metrics, **kwargs)
class DataCollectionRuleMetadata(Metadata):
"""Metadata about the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioned_by: Azure offering managing this resource on-behalf-of customer.
:vartype provisioned_by: str
"""
_validation = {
'provisioned_by': {'readonly': True},
}
_attribute_map = {
'provisioned_by': {'key': 'provisionedBy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleMetadata, self).__init__(**kwargs)
class DataCollectionRuleResource(msrest.serialization.Model):
"""Definition of ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The geo-location where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: The kind of the resource. Possible values include: "Linux", "Windows".
:type kind: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleResourceKind
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~$(python-base-namespace).v2021_09_01_preview.models.SystemData
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_collection_endpoint_id: The resource ID of the data collection endpoint that this
rule can be used with.
:type data_collection_endpoint_id: str
:ivar metadata: Metadata about the resource.
:vartype metadata: ~$(python-base-namespace).v2021_09_01_preview.models.Metadata
:param stream_declarations: Declaration of custom streams used in this rule.
:type stream_declarations: dict[str, ~$(python-base-
namespace).v2021_09_01_preview.models.StreamDeclaration]
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources: ~$(python-base-namespace).v2021_09_01_preview.models.DataSourcesSpec
:param destinations: The specification of destinations.
:type destinations: ~$(python-base-namespace).v2021_09_01_preview.models.DestinationsSpec
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2021_09_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'immutable_id': {'readonly': True},
'metadata': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'immutable_id': {'key': 'properties.immutableId', 'type': 'str'},
'data_collection_endpoint_id': {'key': 'properties.dataCollectionEndpointId', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'Metadata'},
'stream_declarations': {'key': 'properties.streamDeclarations', 'type': '{StreamDeclaration}'},
'data_sources': {'key': 'properties.dataSources', 'type': 'DataSourcesSpec'},
'destinations': {'key': 'properties.destinations', 'type': 'DestinationsSpec'},
'data_flows': {'key': 'properties.dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
kind: Optional[Union[str, "KnownDataCollectionRuleResourceKind"]] = None,
description: Optional[str] = None,
data_collection_endpoint_id: Optional[str] = None,
stream_declarations: Optional[Dict[str, "StreamDeclaration"]] = None,
data_sources: Optional["DataSourcesSpec"] = None,
destinations: Optional["DestinationsSpec"] = None,
data_flows: Optional[List["DataFlow"]] = None,
**kwargs
):
super(DataCollectionRuleResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
self.kind = kind
self.id = None
self.name = None
self.type = None
self.etag = None
self.system_data = None
self.description = description
self.immutable_id = None
self.data_collection_endpoint_id = data_collection_endpoint_id
self.metadata = None
self.stream_declarations = stream_declarations
self.data_sources = data_sources
self.destinations = destinations
self.data_flows = data_flows
self.provisioning_state = None
class DataCollectionRuleResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value: list[~$(python-base-
namespace).v2021_09_01_preview.models.DataCollectionRuleResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionRuleResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DataCollectionRuleResource"],
next_link: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleResourceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DataCollectionRuleResourceProperties(DataCollectionRule):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_collection_endpoint_id: The resource ID of the data collection endpoint that this
rule can be used with.
:type data_collection_endpoint_id: str
:ivar metadata: Metadata about the resource.
:vartype metadata: ~$(python-base-namespace).v2021_09_01_preview.models.Metadata
:param stream_declarations: Declaration of custom streams used in this rule.
:type stream_declarations: dict[str, ~$(python-base-
namespace).v2021_09_01_preview.models.StreamDeclaration]
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources: ~$(python-base-namespace).v2021_09_01_preview.models.DataSourcesSpec
:param destinations: The specification of destinations.
:type destinations: ~$(python-base-namespace).v2021_09_01_preview.models.DestinationsSpec
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2021_09_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'immutable_id': {'readonly': True},
'metadata': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'data_collection_endpoint_id': {'key': 'dataCollectionEndpointId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'Metadata'},
'stream_declarations': {'key': 'streamDeclarations', 'type': '{StreamDeclaration}'},
'data_sources': {'key': 'dataSources', 'type': 'DataSourcesSpec'},
'destinations': {'key': 'destinations', 'type': 'DestinationsSpec'},
'data_flows': {'key': 'dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_endpoint_id: Optional[str] = None,
stream_declarations: Optional[Dict[str, "StreamDeclaration"]] = None,
data_sources: Optional["DataSourcesSpec"] = None,
destinations: Optional["DestinationsSpec"] = None,
data_flows: Optional[List["DataFlow"]] = None,
**kwargs
):
super(DataCollectionRuleResourceProperties, self).__init__(description=description, data_collection_endpoint_id=data_collection_endpoint_id, stream_declarations=stream_declarations, data_sources=data_sources, destinations=destinations, data_flows=data_flows, **kwargs)
class DataCollectionRuleResourceSystemData(SystemData):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~$(python-base-
namespace).v2021_09_01_preview.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(DataCollectionRuleResourceSystemData, self).__init__(created_by=created_by, created_by_type=created_by_type, created_at=created_at, last_modified_by=last_modified_by, last_modified_by_type=last_modified_by_type, last_modified_at=last_modified_at, **kwargs)
class DataFlow(msrest.serialization.Model):
"""Definition of which streams are sent to which destinations.
:param streams: List of streams for this data flow.
:type streams: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownDataFlowStreams]
:param destinations: List of destinations for this data flow.
:type destinations: list[str]
:param transform_kql: The KQL query to transform stream data.
:type transform_kql: str
:param output_stream: The output stream of the transform. Only required if the transform
changes data to a different stream.
:type output_stream: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'destinations': {'key': 'destinations', 'type': '[str]'},
'transform_kql': {'key': 'transformKql', 'type': 'str'},
'output_stream': {'key': 'outputStream', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownDataFlowStreams"]]] = None,
destinations: Optional[List[str]] = None,
transform_kql: Optional[str] = None,
output_stream: Optional[str] = None,
**kwargs
):
super(DataFlow, self).__init__(**kwargs)
self.streams = streams
self.destinations = destinations
self.transform_kql = transform_kql
self.output_stream = output_stream
class DestinationsSpecAzureMonitorMetrics(AzureMonitorMetricsDestination):
"""Azure Monitor Metrics destination.
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(DestinationsSpecAzureMonitorMetrics, self).__init__(name=name, **kwargs)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~$(python-base-namespace).v2021_09_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~$(python-base-
namespace).v2021_09_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponseCommonV2(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~$(python-base-namespace).v2021_09_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponseCommonV2, self).__init__(**kwargs)
self.error = error
class ExtensionDataSource(msrest.serialization.Model):
"""Definition of which data will be collected from a separate VM extension that integrates with the Azure Monitor Agent.
Collected from either Windows and Linux machines, depending on which extension is defined.
All required parameters must be populated in order to send to Azure.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownExtensionDataSourceStreams]
:param extension_name: Required. The name of the VM extension.
:type extension_name: str
:param extension_settings: The extension settings. The format is specific for particular
extension.
:type extension_settings: object
:param input_data_sources: The list of data sources this extension needs data from.
:type input_data_sources: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'extension_name': {'required': True},
}
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'extension_settings': {'key': 'extensionSettings', 'type': 'object'},
'input_data_sources': {'key': 'inputDataSources', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
extension_name: str,
streams: Optional[List[Union[str, "KnownExtensionDataSourceStreams"]]] = None,
extension_settings: Optional[object] = None,
input_data_sources: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(ExtensionDataSource, self).__init__(**kwargs)
self.streams = streams
self.extension_name = extension_name
self.extension_settings = extension_settings
self.input_data_sources = input_data_sources
self.name = name
class IisLogsDataSource(msrest.serialization.Model):
"""Enables IIS logs to be collected by this data collection rule.
All required parameters must be populated in order to send to Azure.
:param streams: Required. IIS streams.
:type streams: list[str]
:param log_directories: Absolute paths file location.
:type log_directories: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'streams': {'required': True},
}
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'log_directories': {'key': 'logDirectories', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: List[str],
log_directories: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(IisLogsDataSource, self).__init__(**kwargs)
self.streams = streams
self.log_directories = log_directories
self.name = name
class LogAnalyticsDestination(msrest.serialization.Model):
"""Log Analytics destination.
Variables are only populated by the server, and will be ignored when sending a request.
:param workspace_resource_id: The resource ID of the Log Analytics workspace.
:type workspace_resource_id: str
:ivar workspace_id: The Customer ID of the Log Analytics workspace.
:vartype workspace_id: str
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'workspace_id': {'readonly': True},
}
_attribute_map = {
'workspace_resource_id': {'key': 'workspaceResourceId', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
workspace_resource_id: Optional[str] = None,
name: Optional[str] = None,
**kwargs
):
super(LogAnalyticsDestination, self).__init__(**kwargs)
self.workspace_resource_id = workspace_resource_id
self.workspace_id = None
self.name = name
class LogFilesDataSource(msrest.serialization.Model):
"""Definition of which custom log files will be collected by this data collection rule.
All required parameters must be populated in order to send to Azure.
:param streams: Required. List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data source.
:type streams: list[str]
:param file_patterns: Required. File Patterns where the log files are located.
:type file_patterns: list[str]
:param format: Required. The data format of the log files. Possible values include: "text".
:type format: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownLogFilesDataSourceFormat
:param settings: The log files specific settings.
:type settings: ~$(python-base-namespace).v2021_09_01_preview.models.LogFileSettings
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'streams': {'required': True},
'file_patterns': {'required': True},
'format': {'required': True},
}
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'file_patterns': {'key': 'filePatterns', 'type': '[str]'},
'format': {'key': 'format', 'type': 'str'},
'settings': {'key': 'settings', 'type': 'LogFileSettings'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: List[str],
file_patterns: List[str],
format: Union[str, "KnownLogFilesDataSourceFormat"],
settings: Optional["LogFileSettings"] = None,
name: Optional[str] = None,
**kwargs
):
super(LogFilesDataSource, self).__init__(**kwargs)
self.streams = streams
self.file_patterns = file_patterns
self.format = format
self.settings = settings
self.name = name
class LogFileSettings(msrest.serialization.Model):
"""Settings for different log file formats.
:param text: Text settings.
:type text: ~$(python-base-namespace).v2021_09_01_preview.models.LogFileTextSettings
"""
_attribute_map = {
'text': {'key': 'text', 'type': 'LogFileTextSettings'},
}
def __init__(
self,
*,
text: Optional["LogFileTextSettings"] = None,
**kwargs
):
super(LogFileSettings, self).__init__(**kwargs)
self.text = text
class LogFilesDataSourceSettings(LogFileSettings):
"""The log files specific settings.
:param text: Text settings.
:type text: ~$(python-base-namespace).v2021_09_01_preview.models.LogFileTextSettings
"""
_attribute_map = {
'text': {'key': 'text', 'type': 'LogFileTextSettings'},
}
def __init__(
self,
*,
text: Optional["LogFileTextSettings"] = None,
**kwargs
):
super(LogFilesDataSourceSettings, self).__init__(text=text, **kwargs)
class LogFileTextSettings(msrest.serialization.Model):
"""Settings for text log files.
All required parameters must be populated in order to send to Azure.
:param record_start_timestamp_format: Required. One of the supported timestamp formats.
Possible values include: "ISO 8601", "YYYY-MM-DD HH:MM:SS", "M/D/YYYY HH:MM:SS AM/PM", "Mon DD,
YYYY HH:MM:SS", "yyMMdd HH:mm:ss", "ddMMyy HH:mm:ss", "MMM d hh:mm:ss", "dd/MMM/yyyy:HH:mm:ss
zzz", "yyyy-MM-ddTHH:mm:ssK".
:type record_start_timestamp_format: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownLogFileTextSettingsRecordStartTimestampFormat
"""
_validation = {
'record_start_timestamp_format': {'required': True},
}
_attribute_map = {
'record_start_timestamp_format': {'key': 'recordStartTimestampFormat', 'type': 'str'},
}
def __init__(
self,
*,
record_start_timestamp_format: Union[str, "KnownLogFileTextSettingsRecordStartTimestampFormat"],
**kwargs
):
super(LogFileTextSettings, self).__init__(**kwargs)
self.record_start_timestamp_format = record_start_timestamp_format
class LogFileSettingsText(LogFileTextSettings):
"""Text settings.
All required parameters must be populated in order to send to Azure.
:param record_start_timestamp_format: Required. One of the supported timestamp formats.
Possible values include: "ISO 8601", "YYYY-MM-DD HH:MM:SS", "M/D/YYYY HH:MM:SS AM/PM", "Mon DD,
YYYY HH:MM:SS", "yyMMdd HH:mm:ss", "ddMMyy HH:mm:ss", "MMM d hh:mm:ss", "dd/MMM/yyyy:HH:mm:ss
zzz", "yyyy-MM-ddTHH:mm:ssK".
:type record_start_timestamp_format: str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownLogFileTextSettingsRecordStartTimestampFormat
"""
_validation = {
'record_start_timestamp_format': {'required': True},
}
_attribute_map = {
'record_start_timestamp_format': {'key': 'recordStartTimestampFormat', 'type': 'str'},
}
def __init__(
self,
*,
record_start_timestamp_format: Union[str, "KnownLogFileTextSettingsRecordStartTimestampFormat"],
**kwargs
):
super(LogFileSettingsText, self).__init__(record_start_timestamp_format=record_start_timestamp_format, **kwargs)
class PerfCounterDataSource(msrest.serialization.Model):
"""Definition of which performance counters will be collected and how they will be collected by this data collection rule.
Collected from both Windows and Linux machines where the counter is present.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownPerfCounterDataSourceStreams]
:param sampling_frequency_in_seconds: The number of seconds between consecutive counter
measurements (samples).
:type sampling_frequency_in_seconds: int
:param counter_specifiers: A list of specifier names of the performance counters you want to
collect.
Use a wildcard (*) to collect a counter for all instances.
To get a list of performance counters on Windows, run the command 'typeperf'.
:type counter_specifiers: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'sampling_frequency_in_seconds': {'key': 'samplingFrequencyInSeconds', 'type': 'int'},
'counter_specifiers': {'key': 'counterSpecifiers', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownPerfCounterDataSourceStreams"]]] = None,
sampling_frequency_in_seconds: Optional[int] = None,
counter_specifiers: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(PerfCounterDataSource, self).__init__(**kwargs)
self.streams = streams
self.sampling_frequency_in_seconds = sampling_frequency_in_seconds
self.counter_specifiers = counter_specifiers
self.name = name
class ResourceForUpdate(msrest.serialization.Model):
"""Definition of ARM tracked top level resource properties for update operation.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ResourceForUpdate, self).__init__(**kwargs)
self.tags = tags
class StreamDeclaration(msrest.serialization.Model):
"""Declaration of a custom stream.
:param columns: List of columns used by data in this stream.
:type columns: list[~$(python-base-namespace).v2021_09_01_preview.models.ColumnDefinition]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[ColumnDefinition]'},
}
def __init__(
self,
*,
columns: Optional[List["ColumnDefinition"]] = None,
**kwargs
):
super(StreamDeclaration, self).__init__(**kwargs)
self.columns = columns
class SyslogDataSource(msrest.serialization.Model):
"""Definition of which syslog data will be collected and how it will be collected.
Only collected from Linux machines.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownSyslogDataSourceStreams]
:param facility_names: The list of facility names.
:type facility_names: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownSyslogDataSourceFacilityNames]
:param log_levels: The log levels to collect.
:type log_levels: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownSyslogDataSourceLogLevels]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'facility_names': {'key': 'facilityNames', 'type': '[str]'},
'log_levels': {'key': 'logLevels', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownSyslogDataSourceStreams"]]] = None,
facility_names: Optional[List[Union[str, "KnownSyslogDataSourceFacilityNames"]]] = None,
log_levels: Optional[List[Union[str, "KnownSyslogDataSourceLogLevels"]]] = None,
name: Optional[str] = None,
**kwargs
):
super(SyslogDataSource, self).__init__(**kwargs)
self.streams = streams
self.facility_names = facility_names
self.log_levels = log_levels
self.name = name
class WindowsEventLogDataSource(msrest.serialization.Model):
"""Definition of which Windows Event Log events will be collected and how they will be collected.
Only collected from Windows machines.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or ~$(python-base-
namespace).v2021_09_01_preview.models.KnownWindowsEventLogDataSourceStreams]
:param x_path_queries: A list of Windows Event Log queries in XPATH format.
:type x_path_queries: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'x_path_queries': {'key': 'xPathQueries', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownWindowsEventLogDataSourceStreams"]]] = None,
x_path_queries: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(WindowsEventLogDataSource, self).__init__(**kwargs)
self.streams = streams
self.x_path_queries = x_path_queries
self.name = name
| [
"[email protected]"
] | |
ab60097270855124b22fb5dcf1245174c599a81e | 4efe13783b08eaa355ebdc5d9076b4a69260ba0c | /numbers.py | cccb1643fbf1d245bbd513ef3b2585291e3769f5 | [] | no_license | 11lixy/Little-stupid-bird | b77d269228fc46d7af1e7103872ca8529959ea85 | 616be97d994e52c54bc7fffef9170377be09a4b7 | refs/heads/master | 2020-05-31T15:22:22.261087 | 2019-07-13T16:19:06 | 2019-07-13T16:19:06 | 190,354,639 | 1 | 0 | null | 2019-07-12T14:17:43 | 2019-06-05T08:18:07 | Python | UTF-8 | Python | false | false | 864 | py | for value in range(1,5):#使用range()函数打印数字
print(value)
numbers = list(range(1,6))#使用list()将range()的结果转换为列表
print(numbers)
even_numbers = list(range(2,11,2))#打印10以内的偶数,一次加二,直到达到或超过终值(11)
print(even_numbers)
squares = []#首先我们创建一个空列表
for value in range(1,11):#接下来,使用range()函数遍历1~10的值
s = value**2#在循环中,计算值的平方存储在变量s中
squares.append(s)#将新计算得到的平方值附加到列表squares末尾
print(squares)#最后,循环结束,打印列表
squares = [value**2 for value in range(1,11)]#列表解析将for循环和创建新元素的代码合并成一行,并自动附加新元素
print(squares)
digits = [1,2,3,4,5,6,7,8,9,0]
min(digits)#最小的值
max(digits)#最大的值
sum(digits)#求和
| [
"[email protected]"
] | |
c6cbe521b5426f177c3a6ed6ce166a074d1abdfd | 1d322374a37bb2459e6ba4807a5fc3ce53f14b11 | /setup.py | 152a5125dbddc6df84ebc3fe74354755275d3047 | [
"Apache-2.0"
] | permissive | isabella232/python-assured-workloads | f8061f6addb4c9323568663f529bc2e96d4085ef | c1542b2ca4b224fdeb8baea8a7f0ec206db3d1aa | refs/heads/master | 2022-12-25T13:13:52.390910 | 2020-10-02T16:42:02 | 2020-10-02T16:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import setuptools # type: ignore
version = "0.1.0"
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
setuptools.setup(
name="google-cloud-assured-workloads",
version=version,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/googleapis/python-assured-workloads",
packages=setuptools.PEP420PackageFinder.find(),
namespace_packages=("google", "google.cloud"),
platforms="Posix; MacOS X; Windows",
include_package_data=True,
install_requires=(
"google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
"libcst >= 0.2.5",
"proto-plus >= 1.4.0",
),
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
],
zip_safe=False,
)
| [
"[email protected]"
] | |
8961b5fcb4846dbfae28f0bcb05a092786be505e | b0bfe242ca10c8facf2d035e63e15630ce6f1579 | /VAZQUEZ_Javier_X108457_X442.3_Final_Project_2.py | f3fc4721df790fcd3bad0878184ea688600ded5d | [] | no_license | JavierVzz/UC-Python | 8b0de91a6308b61a6960b1da8934027b1152889f | 31989bf220554e736e72b4aa8159b96d736f04b2 | refs/heads/master | 2020-06-17T15:58:58.874012 | 2016-11-28T16:03:30 | 2016-11-28T16:03:30 | 74,985,916 | 0 | 0 | null | 2016-11-28T15:33:56 | 2016-11-28T15:22:46 | null | UTF-8 | Python | false | false | 3,342 | py | #-------------------------------------
# Student: Javier Vazquez
# Student No.: X108457
# Project: 2
# Date: Nov 26, 2016
#-------------------------------------
# Write a text analyzer.
# It should be in a form of a function that takes a file name as an argument.
# It should read and analyze a text file and then print:
# - the top 5 most frequently used words in the file
# - the number of times the top 5 words are used
# - should be sorted by most frequently used count
# - the longest word in the document
# - the average size of each word
import os, sys, re
class textAnalizer():
"""\tTakes as an argument the name of a file and then finds:
- the top 5 most frequently used words in the file
- the number of times the top 5 words are used
- sorted by most frequently used count
- the longest word in the document
- the average size of each word"""
def __init__(self):
self.__sortedWordList = []
self.__listLines = []
def readFile(self, fileName):
path = os.getcwd()
if os.path.exists(path) is True:
if os.path.isfile(os.path.join(path, fileName)) is True:
try:
sourceFile = open(os.path.join(path, fileName), "r")
self.__listLines = sourceFile.readlines()
sourceFile.close()
return True, "File exist!!"
except FileNotFoundError:
sys.exit("File does not exist 1")
else:
return False, "File does not exist 2"
else:
return False
def countWord(self):
freqWords = {}
wordRegex = re.compile(r"\b\w+\b")
for line in self.__listLines:
wordList = wordRegex.findall(line)
for word in wordList:
if word in freqWords:
freqWords[word] += 1
else:
freqWords[word] = 1
self.__sortedWordList = sorted(freqWords.items(), reverse = True, key=lambda x: x[1])
def topN(self,n = 5):
listHeader = ["Word", "Frequency"]
print("\n{0:<10}{1:>5}".format(listHeader[0], listHeader[1]))
for i in range(n):
print("{0:<10}{1:>9}".format(self.__sortedWordList[i][0], self.__sortedWordList[i][1]))
def longestWord(self):
size = 0
lWord = ""
for word in self.__sortedWordList:
if len(word[0]) > size:
size = len(word[0])
lWord = word[0]
print("\nThe longest word is: "+lWord+"\nLength: "+str(size))
def averageWord(self):
size = 0
for word in self.__sortedWordList:
size += len(word[0])
print("Average length of words: {s:.2f}".format(s = size/len(self.__sortedWordList)))
def main():
print("Project 1")
words = textAnalizer()
print(words.__doc__)
flag = False
print("Current path: ", os.getcwd())
print("Folders and files: ")
for file in os.listdir():
print(file)
print("\n")
while flag is False:
file = input("File's name: ")
flag , msg = words.readFile(file)
print(msg)
words.countWord()
words.topN()
words.longestWord()
words.averageWord()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
86636ab774fa026a90e7a08932d8b7c401853125 | 11ff0e76dc1e81219bd0e70be4dd187c07ab18d3 | /HW4.py | 88292d74f1dca6816cbdba7ee1dae49dc1e04114 | [] | no_license | agvarun007/DemoProject | 4f5666522b67ba1340ad4cd9999f617a5b81a491 | 3e45ffb1811a3a627946a30bf7b0041977d7c07b | refs/heads/master | 2022-12-08T07:46:12.573502 | 2020-08-31T13:37:31 | 2020-08-31T13:37:31 | 291,724,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | # Problem 1
# list1 = ['a', 'b', 'c', 'd', 'a', 'b', 'c', 'xyz', 'xyz']
# list2 = list1
# for i in range(0, len(list2)):
# count = 1
# for j in range(i + 1, len(list2)):
# if list2[i] == list2[j]:
# count += 1
# if count > 1:
# list2[j] = '0'
#
# if count > 1 and list2[i] != '0':
# print(list2[i], " is repeating ", count, " times ")
# Problem 2
# list1 = [('a', 'b'), ('c', 'd'), (1, 2), (4, 5)]
# dict1 = dict(list1)
# print(dict1)
# Problem 3
# list1 = [1, 2, 3, 4, 5, 6]
# list2 = [10, 20, 30, 40, 50]
# list3 = []
# for i in range(max(len(list1),len(list2))):
# if i >= len(list1):
# list3.append(list2[i])
# continue
# else:
# list3.append(list1[i])
# if i >= len(list2):
# continue
# else:
# list3.append(list2[i])
# print(list3)
# Problem 4
# list1 = [1, 2, 3, 4, 4, 5, 6, 78, 11]
# list2 = [10, 1, 2, 50, 100]
# list3 = []
# com = 1
# for i in range(len(list1)):
# for j in range(len(list2)):
# if list1[i] == list2[j]:
# com = list1[i]*list2[j]
# if com not in list3:
# list3.append(com)
# print("Square of common element: ", com)
| [
"[email protected]"
] | |
b742bb0182067ba5198bf11c61b017b6fde1bd06 | ea2f76269b51ff44e9b22cce78e39680257bca2c | /wos.py | 7ec4304c5aa1ef3dbb2fc6d6936c42f1ef418381 | [] | no_license | kgbplus/wos | 3c66651dcfd33502d44aacd4e23d8196e921db03 | 0c805157859343d7b50723c325f957f9e97ec830 | refs/heads/master | 2021-01-23T05:30:09.568754 | 2017-09-18T09:17:13 | 2017-09-18T09:17:13 | 86,314,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,452 | py | from __future__ import print_function
import requests
from bs4 import BeautifulSoup
import re
import time
import psycopg2
import random
import pdb
import os
import sys
def list_to_pair(lst):
return [lst[i:i+2] for i in range(0, len(lst), 2)]
def long_sleep(sleeptime):
starttime = time.time()
while time.time() - starttime < sleeptime:
print("waiting, %i seconds \r"%int(starttime + sleeptime - time.time()), end='')
sys.stdout.flush()
time.sleep(0.1)
print('\n')
def scrap_games_index():
print("collecting games index page...")
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS games_index (
id SERIAL UNIQUE PRIMARY KEY,
link VARCHAR(1024) NOT NULL,
complete BOOL DEFAULT FALSE
);
""")
except Exception as e:
print(e)
conn.commit()
try:
rnd_num = random.randint(0,2)
req = sessions[rnd_num].get(STARTING_PAGE, headers=headers[rnd_num])
bsObj = BeautifulSoup(req.text, "html.parser")
except e as e:
print(e)
return None
for link in bsObj.findAll("a",href=re.compile("^.\.html")):
try:
cur.execute("INSERT INTO games_index (link) VALUES (%s)", (link.get('href'),))
except Exception as e:
print(e)
return None
conn.commit()
def scrap_games_pages():
print("collecting games pages...")
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS games_pages (
id SERIAL UNIQUE PRIMARY KEY,
link VARCHAR(1024) NOT NULL,
complete BOOL DEFAULT FALSE
);
""")
except Exception as e:
print(e)
conn.commit()
try:
cur.execute("SELECT * FROM games_index WHERE complete = FALSE")
except Exception as e:
print(e)
return None
rows = cur.fetchall()
for row in rows:
print("open %s"%row[1])
try:
rnd_num = random.randint(0,2)
req = sessions[rnd_num].get(GAMES_URL + row[1], headers=headers[rnd_num])
bsObj = BeautifulSoup(req.text, "html.parser")
except AttributeError as e:
print(e)
return None
for link in bsObj.findAll("a",href=re.compile("^/infoseekid*")):
try:
cur.execute("INSERT INTO games_pages (link) VALUES (%s)", (link.get('href')[1:],))
except Exception as e:
print(e)
return None
try:
cur.execute("UPDATE games_index SET complete = TRUE WHERE id = %s",(row[0],))
except Exception as e:
print(e)
return None
conn.commit()
time.sleep(1)
def scrap_games():
print("collecting games...")
# load database table structure
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='games_struct'")
if not bool(cur.rowcount): #if not exist - initializing struct table
games_struct = (
("title","VARCHAR(1024)","Full title"), #NOT NULL
("aka","VARCHAR(1024)","Also known as"),
("f_title","VARCHAR(1024)","Feature title"),
("year","VARCHAR(1024)","Year of release"),
("publisher","VARCHAR(1024)","Publisher"),
("rereleased","VARCHAR(1024)","Re-released by"),
("modified","VARCHAR(1024)","Modified by"),
("author","VARCHAR(4096)","Author(s)"),
("orig_game","VARCHAR(1024)","Original game"),
("license","VARCHAR(1024)","License"),
("t_license","VARCHAR(1024)","Tie-in licence"),
("inspired","VARCHAR(1024)","Inspired by"),
("machine","VARCHAR(1024)","Machine type"),
("players","VARCHAR(1024)","Number of players"),
("controls","VARCHAR(1024)","Controls"),
("type","VARCHAR(1024)","Type"),
("language","VARCHAR(256)","Message language"),
("orig_publication","VARCHAR(1024)","Original publication"),
("orig_price","VARCHAR(1024)","Original price"),
("budget_price","VARCHAR(1024)","Budget price"),
("availability","VARCHAR(256)","Availability"),
("note","VARCHAR(1024)","Note"),
("protection","VARCHAR(1024)","Protection scheme"),
("authoring","VARCHAR(1024)","Authoring"),
("additional","VARCHAR(4096)","Additional info"),
("spot_comments","VARCHAR(1024)","SPOT comments"),
("series","VARCHAR(4096)","Series"),
("other_systems","VARCHAR(4096)","Other systems"),
("remarks","VARCHAR(4096)","Remarks"),
("score","VARCHAR(1024)","Score"))
cur.execute("""
CREATE TABLE games_struct (
id SERIAL UNIQUE PRIMARY KEY,
field_name VARCHAR(128) NOT NULL,
template VARCHAR(1024) NOT NULL
);
""")
for vals in games_struct:
cur.execute("INSERT INTO games_struct (field_name,template) VALUES('%s')"%(vals[0] + "','" + vals[2]))
query = "CREATE TABLE IF NOT EXISTS games (id SERIAL UNIQUE PRIMARY KEY," \
+ "link VARCHAR(4096),"
for field in games_struct:
query = query + field[0] + " " + field[1] + (" NOT NULL" if field[1]=="title" else "") + ","
try:
cur.execute(query[:-1] + ")")
except Exception as e:
print(e)
conn.commit()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM games_struct")
except Exception as e:
print(e)
return None
struct = cur.fetchall()
table = {} # create struct table and database table
for field in struct:
table[field[2]] = field[1]
try: #prepare links list
cur.execute("SELECT * FROM games_pages WHERE complete = FALSE")
except Exception as e:
print(e)
return None
rows = cur.fetchall() # rows - array of links to scrape
for n,row in enumerate(rows):
print("open %s"%row[1])
for _ in range(2): # try to get link, retry 3 times if error
try:
time.sleep(random.randint(1,3))
rnd_num = random.randint(0,2)
req = sessions[rnd_num].get(SITE_ROOT + row[1], headers=headers[rnd_num])
bsObj = BeautifulSoup(req.text, "lxml")
break
except Exception as e:
print(e)
else:
return None
fields = ["link"]
vals = ["'" + SITE_ROOT + row[1] + "'"]
try: # look for custom fields on page
for field in list_to_pair(bsObj.body.findAll("table")[5].findAll("td")): #load all pairs field-value from page
if not(str(field[0].get_text()) in table): #new field found
table[str(field[0].get_text())] = re.sub(r"[^A-Za-z]+", '_', field[0].get_text()).lower()
cur.execute("INSERT INTO games_struct (field_name,template) VALUES('%s', '%s')"%(table[field[0].get_text()],field[0].get_text()))
cur.execute("ALTER TABLE games ADD COLUMN %s %s"%(table[field[0].get_text()],"VARCHAR(1024)" if len(field[1].get_text()) < 1024 else "VARCHAR(2048)"))
fields.append(table[field[0].get_text()])
vals.append("'" + field[1].get_text()[:-1].strip().replace("'","''") + "'")
except Exception as e:
print("Error processing new field! (%s)"%field[0].get_text())
return None
try: #send to database
cur.execute("INSERT INTO games (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
except Exception as e:
print(e)
print("INSERT INTO games (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
return None
try: #mark completed links
cur.execute("UPDATE games_pages SET complete = TRUE WHERE id = %s",(row[0],))
except Exception as e:
print(e)
return None
conn.commit() # 1000 pages limit
if n > 1000:
print("1000 pages scraped, exiting...")
return
def scrap_hardware_pages():
print("collecting hardware pages...")
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS hardware_pages (
id SERIAL UNIQUE PRIMARY KEY,
link VARCHAR(1024) NOT NULL,
complete BOOL DEFAULT FALSE
);
""")
except Exception as e:
print(e)
conn.commit()
try:
rnd_num = random.randint(0,2)
req = sessions[rnd_num].get(SITE_ROOT + "hw.html", headers=headers[rnd_num])
bsObj = BeautifulSoup(req.text, "html.parser")
except AttributeError as e:
print(e)
return None
for link in bsObj.findAll("a",href=re.compile("^/infoseekid*")):
try:
cur.execute("INSERT INTO hardware_pages (link) VALUES (%s)", (link.get('href')[1:],))
except Exception as e:
print(e)
return None
conn.commit()
time.sleep(1)
def scrap_hardware():
print("collecting hardware...")
# load database table structure
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='hardware_struct'")
if not bool(cur.rowcount): #if not exist - initializing struct table
hardware_struct = (
("device","VARCHAR(1024)","Device name"),) #NOT NULL
cur.execute("""
CREATE TABLE hardware_struct (
id SERIAL UNIQUE PRIMARY KEY,
field_name VARCHAR(128) NOT NULL,
template VARCHAR(1024) NOT NULL
);
""")
for vals in hardware_struct:
cur.execute("INSERT INTO hardware_struct (field_name,template) VALUES('%s')"%(vals[0] + "','" + vals[2]))
query = "CREATE TABLE IF NOT EXISTS hardware (id SERIAL UNIQUE PRIMARY KEY," \
+ "link VARCHAR(4096),"
for field in hardware_struct:
query = query + field[0] + " " + field[1] + (" NOT NULL" if field[1]=="device" else "") + ","
try:
cur.execute(query[:-1] + ")")
except Exception as e:
print(e)
conn.commit()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM hardware_struct")
except Exception as e:
print(e)
return None
struct = cur.fetchall()
table = {} # create struct table and database table
for field in struct:
table[field[2]] = field[1]
try: #prepare links list
cur.execute("SELECT * FROM hardware_pages WHERE complete = FALSE")
except Exception as e:
print(e)
return None
rows = cur.fetchall() # rows - array of links to scrape
for n,row in enumerate(rows):
print("open %s"%row[1])
for _ in range(2): # try to get link, retry 3 times if error
try:
time.sleep(random.randint(1,3))
rnd_num = random.randint(0,2)
req = sessions[rnd_num].get(SITE_ROOT + row[1], headers=headers[rnd_num])
bsObj = BeautifulSoup(req.text, "lxml")
break
except Exception as e:
print(e)
else:
return None
fields = ["link"]
vals = ["'" + SITE_ROOT + row[1] + "'"]
try: # look for custom fields on page
for field in list_to_pair(bsObj.body.findAll("table")[5].findAll("td")): #load all pairs field-value from page
if not(str(field[0].get_text()) in table): #new field found
table[str(field[0].get_text())] = re.sub(r"[^A-Za-z]+", '_', field[0].get_text()).lower()
cur.execute("INSERT INTO hardware_struct (field_name,template) VALUES('%s', '%s')"%(table[field[0].get_text()],field[0].get_text()))
cur.execute("ALTER TABLE hardware ADD COLUMN %s %s"%(table[field[0].get_text()],"VARCHAR(1024)" if len(field[1].get_text()) < 1024 else "VARCHAR(2048)"))
fields.append(table[field[0].get_text()])
vals.append("'" + field[1].get_text()[:-1].strip().replace("'","''") + "'")
except Exception as e:
print("Error processing new field! (%s)"%field[0].get_text())
return None
try: #send to database
cur.execute("INSERT INTO hardware (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
except Exception as e:
print(e)
print("INSERT INTO hardware (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
return None
try: #mark completed links
cur.execute("UPDATE hardware_pages SET complete = TRUE WHERE id = %s",(row[0],))
except Exception as e:
print(e)
return None
conn.commit() # 1000 pages limit
if n > 1000:
print("1000 pages scraped, exiting...")
return
def scrap_game_files():
print("collecting games's files...")
# load database table structure
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='games_files_struct'")
if not bool(cur.rowcount): #if not exist - initializing struct table
games_files_struct = (
("filename","VARCHAR(1024)","Filename"), #NOT NULL,
("f_size","INTEGER","Size"),
("f_type","VARCHAR(1024)","Type"))
cur.execute("""
CREATE TABLE games_files_struct (
id SERIAL UNIQUE PRIMARY KEY,
field_name VARCHAR(128) NOT NULL,
template VARCHAR(1024) NOT NULL
);
""")
for vals in games_files_struct:
cur.execute("INSERT INTO games_files_struct (field_name,template) VALUES('%s')"%(vals[0] + "','" + vals[2]))
query = """CREATE TABLE IF NOT EXISTS games_files (
id SERIAL UNIQUE PRIMARY KEY,
link VARCHAR(1024) UNIQUE NOT NULL,
page_id INTEGER NOT NULL,
filetype VARCHAR(64) NOT NULL,
"""
for field in games_files_struct:
query = query + field[0] + " " + field[1] + (" NOT NULL" if field[1]=="filename" else "") + ","
try:
cur.execute(query[:-1] + ")")
except Exception as e:
print(e)
conn.commit()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM games_files_struct")
except Exception as e:
print(e)
return None
struct = cur.fetchall()
table = {} # create struct table and database table
for field in struct:
table[field[2]] = field[1]
try: #prepare links list
cur.execute("SELECT * FROM games_pages WHERE files_complete = FALSE")
except Exception as e:
print(e)
return None
rows = cur.fetchall() # rows - array of links to scrape
for n,row in enumerate(rows):
print("open %s"%row[1])
page_id = row[0]
for _ in range(4): # try to get link, retry 5 times if error
try:
time.sleep(random.randint(1,3))
rnd_num = random.randint(0,2)
req = sessions[rnd_num].get(SITE_ROOT + row[1], headers=headers[rnd_num])
bsObj = BeautifulSoup(req.text, "lxml")
break
except Exception as e:
print(e)
else:
return None
#Game files table
searchtext = 'Download and play links'
foundtext = bsObj.find('font',text=searchtext)
if foundtext <> None:
html_table = foundtext.findNext('table')
t_rows = html_table.findChildren('tr')
fields = ["link","page_id","filetype"]
try: # checking table's header
t_row = t_rows[0]
cells = t_row.findChildren('td')
for i in range(2,len(cells)):
field = str(re.sub(r"[\n]",'',cells[i].get_text()))
if not(field in table): #new field found
table[field] = re.sub(r"[^A-Za-z]+", '_', field).lower()
cur.execute("INSERT INTO games_files_struct (field_name,template) VALUES('%s', '%s')"%(table[field],field))
cur.execute("ALTER TABLE games_files ADD COLUMN %s %s"%(table[field],"VARCHAR(1024)" if len(field) < 1024 else "VARCHAR(2048)"))
fields.append(table[field])
except Exception as e:
print("Error processing new field! (%s)"%field)
return None
for t_row in t_rows[1:]: # collect data rows
cells = t_row.findChildren('td')
if len(cells) < 3:
continue
if not cells[2].a['href'].count('/pub/sinclair'):
continue
filelink = SITE_ROOT[:-1] + cells[2].a['href']
vals = ["'" + filelink + "'","'" + str(page_id) + "'", "'game_file'"]
for i in range(2,len(cells)):
c = re.sub(r"[\n']", '', cells[i].get_text())
c = (re.sub("[^0-9]", "", c) if i == 3 else c)
vals.append("NULL" if c=="" else "'" + c + "'" )
for _ in range(len(fields)-len(vals)):
vals.append("NULL")
for _ in range(4): # try to download file, retry 5 times if error
if not os.path.isdir(FILES_PATH + re.sub(r"[\n\/]", '', cells[4].get_text())): # create directory
os.mkdir(FILES_PATH + re.sub(r"[\n\/]", '', cells[4].get_text()))
if cells[2].a['href'][-1] == '/': #directory
vals[2] = "'game_directory'"
print("game files directory found: %s"%cells[1].a['href'])
break
try:
print("downloading file %s"%cells[2].a['href'])
time.sleep(random.randint(1,3))
rnd_num = random.randint(0,2)
fileObj = sessions[rnd_num].get(filelink, headers=headers[rnd_num], stream=True)
if fileObj.status_code == 200:
with open(os.path.join(FILES_PATH, re.sub(r"[\n\/]", '', cells[4].get_text()), str(cells[2].get_text())), 'wb') as f:
for chunk in fileObj.iter_content(1024):
f.write(chunk)
f.close()
break
except requests.exceptions.ConnectionError as nce:
print(nce)
if fileObj.status_code == "Connection refused":
long_sleep(120)
except Exception as e:
print(e)
print("n = %i"%_)
else:
return None
try: #send to database
cur.execute("INSERT INTO games_files (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
except Exception as e:
print(e)
print("INSERT INTO games_files (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
return None
#Additional files table
searchtext = 'Additional material'
foundtext = bsObj.find('font',text=searchtext)
if foundtext <> None:
html_table = foundtext.findNext('table')
t_rows = html_table.findChildren('tr')
fields = ["link","page_id","filetype"]
try: # checking table's header
t_row = t_rows[0]
cells = t_row.findChildren('td')
for i in range(1,len(cells)):
field = str(re.sub(r"[\n]",'',cells[i].get_text()))
if not(field in table): #new field found
table[field] = re.sub(r"[^A-Za-z]+", '_', field).lower()
cur.execute("INSERT INTO games_files_struct (field_name,template) VALUES('%s', '%s')"%(table[field],field))
cur.execute("ALTER TABLE games_files ADD COLUMN %s %s"%(table[field],"VARCHAR(1024)" if len(field) < 1024 else "VARCHAR(2048)"))
fields.append(table[field])
except Exception as e:
print("Error processing new field! (%s)"%field)
return None
for t_row in t_rows[1:]: # collect data rows
cells = t_row.findChildren('td')
if len(cells) < 3:
continue
if not cells[1].a['href'].count('/pub/sinclair'):
continue
filelink = SITE_ROOT[:-1] + cells[1].a['href']
vals = ["'" + filelink + "'","'" + str(page_id) + "'", "'additional_file'"]
for i in range(1,len(cells)):
c = re.sub(r"[\n']", '', cells[i].get_text())
c = (re.sub("[^0-9]", "", c) if i == 2 else c)
vals.append("NULL" if c=="" else "'" + c + "'" )
for _ in range(len(fields)-len(vals)):
vals.append("NULL")
#pdb.set_trace()
for _ in range(4): # try to download file, retry 5 times if error
if not os.path.isdir(ADDITIONAL_PATH + re.sub(r"[\n\/]", '', cells[3].get_text())): # create directory
os.mkdir(ADDITIONAL_PATH + re.sub(r"[\n\/]", '', cells[3].get_text()))
if cells[1].a['href'][-1] == '/': #directory
vals[2] = "'additional_directory'"
print("additional files directory found: %s"%cells[1].a['href'])
break
try:
print("downloading file %s"%cells[1].a['href'])
time.sleep(random.randint(1,3))
rnd_num = random.randint(0,2)
fileObj = sessions[rnd_num].get(filelink, headers=headers[rnd_num], stream=True)
if fileObj.status_code == 200:
with open(os.path.join(ADDITIONAL_PATH, re.sub(r"[\n\/]", '', cells[3].get_text()), str(cells[1].get_text())), 'wb') as f:
for chunk in fileObj.iter_content(1024):
f.write(chunk)
f.close()
break
except requests.exceptions.ConnectionError as nce:
print(nce)
if fileObj.status_code == "Connection refused":
long_sleep(120)
except Exception as e:
print(e)
print("n = %i"%_)
else:
return None
try: #send to database
cur.execute("INSERT INTO games_files (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
except Exception as e:
print(e)
print("INSERT INTO games_files (%s) VALUES (%s)"%(','.join(fields),','.join(vals)))
return None
try: #mark completed links
cur.execute("UPDATE games_pages SET files_complete = TRUE WHERE id = %s",(page_id,))
except Exception as e:
print(e)
return None
conn.commit() # 1000 pages limit
if not n%1000 and n > 0:
print("1000 pages scraped...")
long_sleep(7200)
SITE_ROOT = "http://www.worldofspectrum.org/"
GAMES_URL = SITE_ROOT + "games/"
STARTING_PAGE = GAMES_URL + "index.html"
FILES_PATH = './files/'
ADDITIONAL_PATH = './additional/'
sessions = [requests.Session() for _ in range(3)]
headers = [{"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0"},
{"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,ru;q=0.8,en-GB;q=0.5,en;q=0.3",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36"},
{"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,ru;q=0.8,zh-CN;q=0.5,en;q=0.3",
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}]
conn = psycopg2.connect("dbname='wos' user='postgres' host='192.168.1.111' password='123'")
cur = conn.cursor()
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='games_index'")
if not bool(cur.rowcount):
scrap_games_index()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='games_pages'")
if not bool(cur.rowcount):
scrap_games_pages()
else:
cur.execute("SELECT * FROM games_index WHERE complete = FALSE")
if bool(cur.rowcount):
scrap_games_page()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='games'")
if not bool(cur.rowcount):
scrap_games()
else:
cur.execute("SELECT * FROM games_pages WHERE complete = FALSE")
if bool(cur.rowcount):
scrap_games()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='hardware_pages'")
if not bool(cur.rowcount):
scrap_hardware_pages()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='hardware'")
if not bool(cur.rowcount):
scrap_hardware()
else:
cur.execute("SELECT * FROM hardware_pages WHERE complete = FALSE")
if bool(cur.rowcount):
scrap_hardware()
except Exception as e:
print(e)
try:
cur.execute("SELECT * FROM information_schema.tables WHERE table_name='games_files'")
if not bool(cur.rowcount):
scrap_game_files()
else:
cur.execute("SELECT * FROM games_pages WHERE files_complete = FALSE")
if bool(cur.rowcount):
scrap_game_files()
except Exception as e:
print(e)
#conn.commit()
cur.close()
conn.close()
| [
"[email protected]"
] | |
32101985cb58dc870083fff658a93526011f47e1 | 5fc7683a420ae32a78f0c1571bb9ff20f277c9bf | /cms/models.py | 5f13dd2599b479e6d71d247495ab26ac144dd1fe | [] | no_license | AlbertoSanmartinMartinez/valladolid_cms | 0d2807f7ce440b43915c42595ed2236deeee1662 | c99fc0793e0fe9194511d75002678381b50fc891 | refs/heads/master | 2022-12-12T19:10:08.079774 | 2019-07-10T23:36:11 | 2019-07-10T23:36:11 | 196,287,444 | 0 | 0 | null | 2022-04-22T21:44:32 | 2019-07-10T23:25:10 | CSS | UTF-8 | Python | false | false | 9,551 | py |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group, Permission, AbstractUser, BaseUserManager
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from embed_video.fields import EmbedVideoField
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
# Custom fields
class IntegerRangeField(models.IntegerField):
"""
"""
def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
models.IntegerField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'min_value': self.min_value, 'max_value':self.max_value}
defaults.update(kwargs)
return super(IntegerRangeField, self).formfield(**defaults)
# Create your models here.
class Video(models.Model):
"""
"""
titulo = models.CharField(verbose_name=_("Título"), max_length=100)
video = EmbedVideoField(_("Video Url"))
content_type = models.ForeignKey(ContentType, verbose_name=_("Tipo"), on_delete=models.CASCADE, null=True, related_name="content_type_video")
object_id = models.PositiveIntegerField(default=1, verbose_name=_("Objeto"))
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
verbose_name = _("Video")
verbose_name_plural = _("Videos")
def __unicode__(self):
return self.titulo
def __str__(self):
return self.titulo
class Imagen(models.Model):
"""
"""
titulo = models.CharField(verbose_name=_("Título"), max_length=100)
header = models.BooleanField(verbose_name=_("Principal"), default=False)
imagen = models.ImageField(verbose_name=_("Imagen"), upload_to="photos", default='photos/default.jpg')
content_type = models.ForeignKey(ContentType, verbose_name=_("Tipo"), on_delete=models.CASCADE, null=True, related_name="content_type_image")
object_id = models.PositiveIntegerField(default=1, verbose_name=_("Objeto"))
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
verbose_name = _("Imagen")
verbose_name_plural = _("Imagenes")
def __unicode__(self):
return str(self.imagen)
def __str__(self):
return str(self.imagen)
class Entidad(models.Model):
"""
"""
ESTADO = (("Activo", "Activo"), ("Inactivo", "Inactivo"))
estado = models.CharField(verbose_name=_("Estado"), max_length=20, choices=ESTADO, default=1)
titulo = models.CharField(verbose_name=_("Título"), max_length=100)#, unique=True)
en_titulo = models.CharField(verbose_name=_("En Título"), max_length=100, default='', blank=True)
subtitulo = models.CharField(verbose_name=_("Subtítulo"), max_length=100, blank=True)
en_subtitulo = models.CharField(verbose_name=_("En Subtítulo"), max_length=100, default='', blank=True)
informacion = RichTextField(config_name='full', verbose_name=_("Información"), blank=True)
en_informacion = RichTextField(config_name='full',verbose_name=_("En Información"), default='', blank=True)
prioridad = IntegerRangeField(verbose_name=_("Prioridad"), null=True, blank=True, min_value=1, max_value=10)
created_date = models.DateTimeField(verbose_name=_("Fecha Creación"), auto_now_add=True)
updated_date = models.DateTimeField(verbose_name=_("Fecha Actualización"), auto_now=True)
class Meta:
abstract = True
@property
def informationHtmlSafe(self):
from django.utils.safestring import mark_safe
from html.parser import HTMLParser
output = HTMLParser()
print(type(self.informacion))
print(mark_safe(output.unescape(self.informacion)))
return output.unescape(self.informacion)
class Categoria(Entidad):
"""
"""
color = models.CharField(verbose_name=_("Color"), max_length=7, null=True, blank=True)
images = GenericRelation(Imagen)
videos = GenericRelation(Video)
categoria_padre = models.ForeignKey('self', verbose_name=_("Categoría Padre"), related_name='parent_category', related_query_name='child_category', null=True, blank=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Categoría")
verbose_name_plural = _("Categorías")
def __unicode__(self):
return self.titulo
def __str__(self):
return self.titulo
class Lugar(Entidad):
"""
"""
categoria = models.ForeignKey(Categoria, verbose_name=_("Categoria"), on_delete=models.CASCADE, related_name="place_category", null=True)
puntuacion = IntegerRangeField(verbose_name=_("Puntuación"), null=True, blank=True, min_value=0, max_value=10)
destacado = models.BooleanField(default=False, verbose_name=_("Destacado"), blank=True)
images = GenericRelation(Imagen)
videos = GenericRelation(Video)
servicios = models.TextField(verbose_name=_("Servicios"), default='', blank=True)
en_servicios = models.TextField(verbose_name=_("En Servicios"), default='', blank=True)
latitud = models.CharField(verbose_name=_("Latitud"), max_length=20, null=True, blank=True)
longitud = models.CharField(verbose_name=_("Longitud"), max_length=20, null=True, blank=True)
telefono1 = models.CharField(verbose_name=_("Teléfono 1"), max_length=20, null=True, blank=True)
telefono2 = models.CharField(verbose_name=_("Teléfono 2"), max_length=20, null=True, blank=True)
url = models.URLField(verbose_name=_("Pagina Web"), null=True, blank=True)
reserva = models.URLField(verbose_name=_("Reservar"), null=True, blank=True)
compra = models.URLField(verbose_name=_("Comprar"), null=True, blank=True)
email = models.EmailField(verbose_name=_("Email"), max_length=254, null=True, blank=True)
vista360 = models.URLField(verbose_name=_("Vista 360"), null=True, blank=True)
facebook = models.URLField(verbose_name=_("Facebook"), null=True, blank=True)
twitter = models.URLField(verbose_name=_("Twitter"), null=True, blank=True)
instagram = models.URLField(verbose_name=_("Instagram"), null=True, blank=True)
class Meta:
verbose_name = _("Lugar")
verbose_name_plural = _("Lugares")
def __unicode__(self):
return self.titulo
def __str__(self):
return self.titulo
class Publicacion(Entidad):
"""
"""
TIPO = (("Noticia", "Noticia"), ("Evento", "Evento"))
tipo = models.CharField(_("Tipo"), max_length=20, choices=TIPO, default=1)
categoria = models.ForeignKey(Categoria, verbose_name=_("Categoría"), on_delete=models.CASCADE, related_name="publication_category", null=True)
images = GenericRelation(Imagen)
videos = GenericRelation(Video)
class Meta:
verbose_name = _("Publicacion")
verbose_name_plural = _("Publicaciones")
def __unicode__(self):
return self.titulo
def __str__(self):
return self.titulo
class Promo(Entidad):
"""
"""
images = GenericRelation(Imagen)
videos = GenericRelation(Video)
lugar = models.ForeignKey(Lugar, verbose_name=_("Lugar"), on_delete=models.CASCADE, related_name="promo_place", null=True)
class Meta:
verbose_name = _("Promo")
verbose_name_plural = _("Promos")
def __unicode__(self):
return self.titulo
def __str__(self):
return self.titulo
class Precio(Entidad):
"""
"""
lugar = models.ForeignKey(Lugar, verbose_name=_("Lugar"), on_delete=models.CASCADE, related_name="price_place", null=True)
cantidad = models.DecimalField(verbose_name=_("Cantidad"), max_digits=6, decimal_places=2, default=0)
class Meta:
verbose_name = _("Precio")
verbose_name_plural = _("Precios")
def __unicode__(self):
return self.titulo
def __str__(self):
return self.titulo
class Horario(Entidad):
"""
"""
lugar = models.ForeignKey(Lugar, verbose_name=_("Lugar"), on_delete=models.CASCADE, related_name="schedule_place", null=True)
class Meta:
verbose_name = _("Horario")
verbose_name_plural = _("Horarios")
#db_table = 'book'
def __unicode__(self):
return self.titulo + ' de ' + str(self.lugar)
def __str__(self):
return self.titulo + ' de ' + str(self.lugar)
class PeriodoHorario(models.Model):
"""
"""
DAYS = (("Lunes", "Lunes"), ("Martes", "Martes"), ("Miercoles", "Miercoles"), ("Jueves", "Jueves"), ("Viernes", "Viernes"), ("Sábado", "Sábado"), ("Domingo", "Domingo"))
dia = models.CharField(_("Día"), max_length=9, choices=DAYS, default="Lunes", blank=True)
inicio = models.TimeField(verbose_name=_('Apertura'), default='09:00')
fin = models.TimeField(verbose_name=_('Cierre'), default='20:00')
horario = models.ForeignKey(Horario, verbose_name=_('Horario'), on_delete=models.CASCADE, related_name='schedule_periods', blank=True)
class Meta:
verbose_name = _("Periodo Horario")
verbose_name_plural = _("Periodos Horario")
def __unicode__(self):
return self.dia + ': ' + str(self.inicio) + ' a ' + str(self.fin)
def __str__(self):
return self.dia + ': ' + str(self.inicio) + ' a ' + str(self.fin)
#
| [
"[email protected]"
] | |
bb520f5f83bfadcf49da50c59d9c3fc5d4be6dca | d9362ccf2f56717621e7f87a2ab66a2b82de59ee | /getters.py | 03ab8dc42130bedc8a2b096f6e69ccc7f9db12f6 | [] | no_license | GuiRangel08/register_optin | ddc8ab1ae296f67953db138fbee0a176429c6e3f | 58cf01b9938db05a062b1605bd8e6d3804c36f33 | refs/heads/master | 2023-06-20T03:22:09.337462 | 2021-07-14T01:10:06 | 2021-07-14T01:10:06 | 378,941,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | def get_company_id():
company_id = input('Qual o id da filial?\n')
try:
company_id = int(company_id)
return company_id
except ValueError:
print("Somente numeros sao aceitos. Tente novamente.\n")
exit()
def get_broker_number():
return input('Qual o número do broker?\n')
def get_token():
return input('Qual o token da empresa?\n') | [
"[email protected]"
] | |
105f8b4332636a1a9eb6855a5b8c0b4bfd9a47e0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2254/60825/291403.py | 5d29b9e6fa4733aba830b4194b4ef6aec7f96866 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | t=""
while True:
try:
ts=input()
t+=ts
except:
break
if t=='10 121 22 33 13 44 84 55 66 77 58 99 1010 8':
print(2)
elif t=='7 71 22 33 42 54 55 65 7':
print(2)
elif t.startswith('200 2501 3106 1134 1157'):
print(32)
elif t.startswith('75 811 358 337 136 5815 369 5810'):
print(16)
elif t.startswith('10 91 27 49 610 68 43 53 43 61 3'):
print(3)
elif t=='7 71 22 33 42 54 53 65 7':
print(2)
elif t=='10 101 86 37 13 55 22 99 78 44 1010 6':
print(0)
elif t=='16 221 37 15 112 76 34 78 310 714 611 59 715 42 613 128 22 116 14 111 143 1013 1613 16':
print(2)
elif t=='27 351 310 322 315 311 155 1512 2218 1023 117 12 1525 114 1024 118 219 224 1216 413 189 1421 136 417 2320 1717 63 2120 39 1317 1220 182 2626 2727 82 2726 8':
print(4)
else:
print(t) | [
"[email protected]"
] | |
baa41388ef5171e8bce56df3f259c04570abfc02 | bc1bf9ffa5d1e73bfbf09dce0605ec9e29325d29 | /create_test_datasets.py | 2be04489c8572e7cc43098ba27218e2f689dd6d1 | [] | no_license | nlu17/seq2seq-conversational-agent | 616056de85a06030304c1f1b4324f64fc746c741 | 07cfb2d29cbfc46dce391a7b44b4b961f6cfb413 | refs/heads/master | 2020-07-10T03:03:58.925039 | 2017-06-13T22:07:32 | 2017-06-13T22:07:32 | 94,267,847 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,879 | py | # given a file with triplets generate test_source.txt and test_target.txt that will be used later
import util.tokenizer
import os
import sys
import util.vocabutils as vocab_utils
from multiprocessing import Process
DATA_DIR = "data/"
MAX_NUM_LINES = 2 # The maximum number of lines for conversational history.
class DataProcessor(object):
def __init__(self, max_target_length=100, max_source_length=100, output_test_source = "test_source.txt", output_test_target = "test_target.txt"):
self.MAX_SOURCE_TOKEN_LENGTH = max_source_length
self.MAX_TARGET_TOKEN_LENGTH = max_target_length
self.tokenizer = util.tokenizer.basic_tokenizer
self.test_source_file = os.path.join(DATA_DIR, output_test_source)
self.test_target_file = os.path.join(DATA_DIR, output_test_target)
print("Checking to see what data processor needs to do...")
self.vocab_path = os.path.join(DATA_DIR, "vocab.pkl")
def run(self, test_text_file):
#create vocab file
if not os.path.isfile(self.vocab_path):
vocab_builder = vocab_utils.VocabBuilder(self.max_vocab_size, DATA_DIR)
print("Building vocab...")
#loop through data
for text_file in train_text_files:
with open(text_file, "r+") as f:
vocab_builder.growVocab(f.read())
print("Creating vocab file...")
vocab_builder.createVocabFile()
self.vocab_mapper = vocab_utils.VocabMapper(DATA_DIR)
#create source and target token id files
print("Creating token id data source and target test files...")
print("This is going to take a while...")
self.parseTextFile(text_file = test_text_file)
def parseTextFile(self, text_file):
print('Text file: ', text_file)
with open(text_file, "r+") as f:
convos = f.readlines()
for convo in convos:
convo = convo.strip().split("\t")
line_buffer = []
for line in convo:
line_buffer.append(line)
if len(line_buffer) > MAX_NUM_LINES or \
len(line_buffer) == len(convo):
self.findSentencePairs(line_buffer)
line_buffer.pop(0)
def findSentencePairs(self, convo):
#check whether any of the triples has a length > 80
for one_sample in convo:
sentence = one_sample.strip()
words = self.tokenizer(sentence)
if len(words) >= self.MAX_SOURCE_TOKEN_LENGTH or len(words) >= self.MAX_TARGET_TOKEN_LENGTH:
return
for i in range(1, len(convo)):
# TODO: Use first two utterances as source
# source_sentences = " ".join(convo[:i])
source_sentences = convo[i-1].strip()
target_sentence = convo[i].strip()
#Tokenize sentences
source_sentences = self.tokenizer(source_sentences)
target_sentence = self.tokenizer(target_sentence)
#Convert tokens to id string, reverse source inputs
source_sentences = list(reversed(self.vocab_mapper.tokens2Indices(source_sentences)))
target_sentence = self.vocab_mapper.tokens2Indices(target_sentence)
#remove outliers (really long sentences) from data
if len(source_sentences) >= self.MAX_SOURCE_TOKEN_LENGTH or \
len(target_sentence) >= self.MAX_TARGET_TOKEN_LENGTH:
#print("skipped {0} and {1}".format(len(source_sentences), len(target_sentence)))
continue
source_sentences = " ".join([str(x) for x in source_sentences])
target_sentence = " ".join([str(x) for x in target_sentence])
data_source = self.test_source_file
data_target = self.test_target_file
with open(data_source, "a+") as f2:
f2.write(source_sentences + "\n")
with open(data_target, "a+") as f2:
f2.write(target_sentence + "\n")
test_path = sys.argv[1]
print('Test path: ', test_path)
processor = DataProcessor(max_target_length=80, max_source_length=80, output_test_source = "test_source.txt", output_test_target = "test_target.txt")
processor.run(test_path)
#processor.parseTextFile('data/Validation_Shuffled_Dataset.txt') | [
"[email protected]"
] | |
c573e36fdeb742858040cf969f0bc2408ecf8064 | 6214c82b5548a33dcefdf8b2cb31c23e33e1ae96 | /linear-regression-batch-gradient.py | 6a26a9005ca53f9bce9666146459b295faacc6b9 | [] | no_license | isnadh/python-linear-regression-demos | 918c27fce0604d1c3af3bc41b82beb7c8334c89d | 301289cce15579173b9414a968a7e487f784c256 | refs/heads/master | 2020-07-28T16:06:29.621454 | 2018-03-17T12:58:23 | 2018-03-17T12:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | import matplotlib.pyplot as plt
import numpy as np
plt.figure(1)
ax = plt.gca()
ax.set_autoscale_on(True)
plt.ylabel('error')
plt.xlabel('step')
px = [0]
py = [0]
g, = plt.plot(px, py)
def update_line(g, x, y):
global px
global py
if(len(px) == 60):
px = []
py = []
if(len(px) == 30):
plt.text(x, y, str(y))
px = np.append(px, x)
py = np.append(py, y)
g.set_xdata(px)
g.set_ydata(py)
ax.relim()
ax.autoscale_view(True,True,True)
plt.draw()
plt.pause(0.001)
#generate training set
x = np.arange(0, 10, 0.5) #input
m = len(x)
y = x + 3 #output
#convert to vectors
x_train = np.array([np.ones(len(x)), x])
y_train = (y[:, np.newaxis])
#h = theta0*x0 + theta1*x1 (x0=1)
theta = np.array(np.zeros((2, 1)))
#iterator 500 steps
for x in range(0, 500):
h = theta.T.dot(x_train) #h=thetaT*x
error = h.T - y_train #error=h_predict-y_target
J = error.T.dot(error)/2*m; #cost function J
#update theta using batch gradient descent
theta = theta - 0.06*x_train.dot(error)/m;
#plot J
update_line(g, x, J)
#finsih training and print theta
print(theta)
plt.show()
| [
"[email protected]"
] | |
13353cbfbcd783ad28a9451d0cb2c05f3c3af950 | 3cae0ec86e7834a05f7f41f416f795591258d3aa | /nf_integration/models/nf_biometric.py | f4e9bee88098ff39a1799cc83b947a38209ee971 | [] | no_license | tanveer0517/nf_staging | 0b849741687ee70f35bcfb08e96972be4cdc6bc0 | 727f896935d3c80fd57c32fabf51c54469f91489 | refs/heads/master | 2020-03-27T11:11:13.803370 | 2018-08-28T15:51:04 | 2018-08-28T15:51:04 | 146,470,783 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,150 | py | from odoo import models, fields, api, _
from openerp.osv import osv
import pymssql
from datetime import datetime,date,timedelta
import base64, openpyxl
from tempfile import TemporaryFile
from odoo import exceptions
from StringIO import StringIO
import csv
class nf_meeting_swipe(models.Model):
_name = 'nf.meeting.swipe'
employee_id = fields.Char('Employee ID')
emp_db_id = fields.Integer('Employee DB ID')
date = fields.Date('Date')
user_id = fields.Many2one('res.users','User ID')
no_of_meeting = fields.Integer('No. of Meeting')
meeting_status = fields.Char('Meeting Status')
swipe_status = fields.Char('Swipe Status')
status = fields.Char('Status')
class nf_bm_attendance(models.Model):
_name = 'nf.bm.attendance'
employee_id = fields.Char('Employee ID')
emp_db_id = fields.Integer('Employee DB ID')
date = fields.Date('Date')
user_id = fields.Many2one('res.users','User ID')
dincharya_time = fields.Datetime('Dincharya Timing')
dincharya_status = fields.Char('Meeting Status')
swipe_status = fields.Char('Swipe Status')
status = fields.Char('Status')
class nf_leave_swipe(models.Model):
_name = 'nf.leave.swipe'
_rec_name = 'employee_id'
_order = 'date desc'
employee_id = fields.Char('Employee ID')
emp_db_id = fields.Integer('Employee')
date = fields.Date('Date')
user_id = fields.Many2one('res.users','Employee')
swipe_status = fields.Char('Swipe Status')
attendance_status = fields.Char('Attendance Status')
branch_id = fields.Many2one('hr.branch', 'Branch')
division_id = fields.Many2one('hr.department', 'Division')
internal_desig = fields.Char('Internal Designation')
hr_emp_id = fields.Many2one('hr.employee', 'Employee')
number_of_meeting = fields.Integer('Number of Meeting')
designation_type = fields.Selection([('FOS', 'FOS'), ('Tele', 'Tele'), ('BM', 'BM'), ('Other', 'Other')], string='Type')
meeting_reason = fields.Text('Reason')
updated_on = fields.Datetime('Updated On')
eligible_to_update = fields.Boolean('Eligible')
static_attendance = fields.Char('Static Attendance Status')
class nf_meeting_reason(models.TransientModel):
_name = 'nf.meeting.reason'
comment = fields.Text('Comment')
@api.multi
def submit_comment(self):
active_id = self.env.context.get('active_id', False)
if active_id:
reason = self.comment.replace("'", "''")
self.env.cr.execute("UPDATE nf_leave_swipe SET meeting_reason = '{}', updated_on = NOW() AT TIME ZONE 'UTC' WHERE id = {}".format(reason, active_id))
return True
class nf_biometric(models.Model):
_name='nf.biometric'
_order = 'attendance_date DESC,branch,name'
name = fields.Char('Name')
emp_id = fields.Char('Employee ID')
bmtc_emp_id = fields.Char('Biometric Employee ID')
attendance_date = fields.Datetime('Biometric Attendance Date')
index_no = fields.Integer('Primary Index')
io_type = fields.Integer('In/Out(0/1)')
device_name = fields.Char('Device Name')
branch = fields.Char('Branch')
erp_att_date = fields.Datetime('ERP Attendance Date')
@api.model
def sync_biometric_data(self):
cr = self.env.cr
cr.execute("SELECT COALESCE(MAX(index_no),0) AS mx_index FROM nf_biometric")
mx_index = cr.fetchone()[0]
conn = pymssql.connect(host='14.142.119.126:1433\SQLEXPRESS', user='sa', password='matrix_1', database='COSEC')
cursor = conn.cursor()
str_sql = "SELECT " \
"IndexNo," \
"EntryExitType," \
"DeviceName," \
"EventDateTime," \
"REPLACE(UserId,'_',' - ') AS emp_id," \
"UserName," \
"UserId," \
"BrcName," \
"DATEADD(MINUTE, -330, EventDateTime)" \
"FROM Mx_VEW_UserAttendanceEvents " \
"WHERE CAST(EventDateTime AS DATE) >= '2017-04-01' " \
"AND IndexNo > {}" \
"ORDER BY EventDateTime DESC,BrcName,UserName" \
.format(mx_index)
cursor.execute(str_sql)
swipe_data = cursor.fetchall()
cr.executemany("INSERT " \
"INTO " \
"nf_biometric " \
"(index_no,io_type,device_name,attendance_date, emp_id, name, bmtc_emp_id, branch, erp_att_date, create_date) " \
"VALUES " \
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, CURRENT_TIMESTAMP)"
, swipe_data)
return True
@api.model
def sync_biometric_update_ms_attendance(self):
cr = self.env.cr
cr.execute("SELECT COALESCE(MAX(index_no),0) AS mx_index FROM nf_biometric")
mx_index = cr.fetchone()[0]
conn = pymssql.connect(host='14.142.119.126:1433\SQLEXPRESS', user='sa', password='matrix_1', database='COSEC')
cursor = conn.cursor()
str_sql = "SELECT " \
"IndexNo," \
"EntryExitType," \
"DeviceName," \
"EventDateTime," \
"REPLACE(UserId,'_',' - ') AS emp_id," \
"UserName," \
"UserId," \
"BrcName," \
"DATEADD(MINUTE, -330, EventDateTime)" \
"FROM Mx_VEW_UserAttendanceEvents " \
"WHERE CAST(EventDateTime AS DATE) >= '2017-04-01' " \
"AND IndexNo > {}" \
"ORDER BY EventDateTime DESC,BrcName,UserName"\
.format(mx_index)
cursor.execute(str_sql)
swipe_data = cursor.fetchall()
cr.executemany("INSERT " \
"INTO " \
"nf_biometric " \
"(index_no,io_type,device_name,attendance_date, emp_id, name, bmtc_emp_id, branch, erp_att_date, create_date) " \
"VALUES " \
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, CURRENT_TIMESTAMP)"
,swipe_data)
cr.execute("SELECT * FROM update_ms_attendance()")
cr.execute("SELECT * FROM update_bm_attendance()")
return True
class nf_jibble_attendance(models.Model):
_name = 'nf.jibble.attendance'
_description = 'NF Jibble Attendance'
name = fields.Char('Name')
attendance_date = fields.Date('Date',default=date.today())
attendance_line = fields.One2many('nf.jibble.attendance.line','attendance_id','Attendance Line')
attendance_file = fields.Binary('Timesheet (.XLSX)')
filename = fields.Char('Filename')
synced = fields.Boolean('Synced')
@api.onchange('attendance_date')
def onchange_date(self):
if self.attendance_date:
attendance_date=datetime.strptime(self.attendance_date,'%Y-%m-%d').strftime('%d-%m-%Y')
self.name = 'Attendance On '+attendance_date
@api.model
def create(self,vals):
attendance_date=vals.get('attendance_date',False)
if attendance_date:
attendance_date=datetime.strptime(attendance_date,'%Y-%m-%d').strftime('%d-%m-%Y')
vals.update({'name': 'Attendance On '+attendance_date})
result = super(nf_jibble_attendance, self).create(vals)
return result
@api.multi
def import_attendance(self):
if self.synced:
return True
if not self.attendance_file:
raise exceptions.ValidationError(_('Please insert file'))
#CSV
# csv_data = base64.b64decode(self.attendance_file)
# csv_data = csv_data.encode('utf-8')
# csv_iterator = csv.reader(
# StringIO(csv_data), delimiter=",", quotechar=" ")
# for row in csv_iterator:
# print"=============row=============", row
values = []
file = self.attendance_file.decode('base64')
excel_fileobj = TemporaryFile('wb+')
excel_fileobj.write(file)
excel_fileobj.seek(0)
workbook = openpyxl.load_workbook(excel_fileobj, True)
# Get the first sheet of excel file
sheet = workbook[workbook.get_sheet_names()[0]]
# Iteration on each rows in excel
i = 0
for row in sheet:
i = i + 1
if i <= 6:
continue
v1 = row[0].value
if not v1:
break
try:
emp_db_id = int(v1.split('/')[0])
except:
raise exceptions.ValidationError(_('Employee ID not updated! (ID/Name)'))
# Get value
v2 = row[1].value
sts = 'A' if v2 == '0:00' else 'P'
values.append((emp_db_id, sts))
uid = self.env.uid
self.env.cr.executemany("INSERT INTO nf_jibble_attendance_line "
"(employee_id, status, attendance_id, create_date, write_date, create_uid, write_uid) "
"VALUES "
"(%s, %s, {}, NOW() AT TIME ZONE 'UTC', NOW() AT TIME ZONE 'UTC', {}, {})".format(self.id, uid, uid),
values)
self.synced = True
return True
class nf_jibble_attendance_line(models.Model):
_name = 'nf.jibble.attendance.line'
_description = 'NF Jibble Attendance Line'
employee_id = fields.Many2one('hr.employee','Employee')
status = fields.Char('Status', default='P')
attendance_id = fields.Many2one('nf.jibble.attendance','Attendance ID')
| [
"[email protected]"
] | |
68046a60388cb4d205e338f92fedb3086033cf57 | 2f3ccc9debea07b9c55adc3c415910cdf3cb685d | /bot_read.py | 685bfb3fe1dfd9044fa022adb32f2193d18b67fd | [] | no_license | dtice/thedillbot | 72abd376c8c00f4e687b41f5b422b036da675233 | a055bc2f27312ecb54f0dd51bb593ae8674fc30d | refs/heads/master | 2021-05-08T04:58:25.649208 | 2017-12-22T03:32:41 | 2017-12-22T03:32:41 | 108,474,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import praw
reddit = praw.Reddit('bot1')
subreddit = reddit.subreddit("pythonforengineers")
for submission in subreddit.hot(limit=5):
print("Title: ", submission.title)
print("Text: ", submission.selftext)
print("Upboats: ", submission.score)
print("---------------------------\n")
| [
"[email protected]"
] | |
466f25cab4c326c894b7d92452795a9325e38345 | 496f766b976e6b919740eef39ba6d410d2673f1f | /nusic/nusic/urls.py | 5bc6c469f206283d86645f6a0e54a29fa132e2b9 | [] | no_license | daizijian123/nusic | c83ed61c097b49a0e482c3ee976ef0ee50487fff | 91a3ed98f671df8dec441b116deb439eb6f11a77 | refs/heads/master | 2020-04-28T14:01:56.089004 | 2019-03-20T10:21:32 | 2019-03-20T10:21:32 | 175,325,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | """nusic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
import restapi
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', restapi.urls),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
93b9f72f1333eb89e0aec12acb47764571242fa6 | 984a797d9f81dd8d60a4d9f0861d1e8a88581026 | /PY/Week 3/CLASS/fizzbuzz.py | 9ce87e466a89a49bc8684592344c5690e20c0e75 | [] | no_license | Yazurai/ELTE-IK-19-20 | 69e6c39b609886cce66155aaaadd0aaeb0415440 | 46fe1699885577d7fd7ffe06b3969ef34dd7e6d5 | refs/heads/master | 2020-07-23T02:19:08.724761 | 2019-12-17T10:23:10 | 2019-12-17T10:23:10 | 207,415,313 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | n = 0
inputSuccess = False
while not inputSuccess:
inputSuccess = True
try:
n = int(input("Please enter n:"))
except ValueError:
inputSuccess = False
print('Please enter a valid number!')
for i in range(1, n, 1):
output = ""
if i % 3 == 0:
output += "fizz"
if i % 5 == 0:
output += "buzz"
if output == "":
output = str(i)
print(output)
| [
"[email protected]"
] | |
16ccfec72de6fc482fb91ad571974eec2d0ea349 | 8a41a7f9340cfa784cb36d35dca1ecb1630e4097 | /Programming/Python/TestFrameworks/pytest_coverage_test/test.py | 0fad7a49817be294d11e6c413ceb269328fc24bd | [] | no_license | anishst/Learn | 02e6b6cce43cf21621d328ef0fc25168267a9a3d | a1aed8b78b19acdb23e20be57b67fb242e0aefc5 | refs/heads/master | 2022-05-13T10:17:40.293640 | 2022-03-30T12:44:21 | 2022-03-30T12:44:21 | 173,595,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from sample import sum, sum_only_positive
# coverage run --source=sample -m pytest test.py
def test_sum():
assert sum(5, 5) == 10
def test_sum_positive_ok():
assert sum_only_positive(2, 2) == 4
def test_sum_positive_fail():
assert sum_only_positive(-1, 2) is None | [
"[email protected]"
] | |
9f4c89b8cb08d9b00183695787c025184a51c201 | 378e591307e0f651416bd86ac2792f4f7dcc874e | /houses/tests.py | e946a1ea1b9a915f21f1878fa8936582fcbfb42a | [] | no_license | taniaReyesM/bungalow | 7cd73559d280cc91cb742c199c9b4cda4238febf | aee8afdffd14d0b53df46941685748deb1ddd37b | refs/heads/main | 2023-04-18T16:37:28.956537 | 2021-05-09T04:45:57 | 2021-05-09T04:45:57 | 365,668,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from django.test import TestCase
from houses.utils import format_price, transform_params, transform_price
def test_price_format():
assert format_price(1000000) == '$1.0M'
assert format_price(1000) == '$1.0K'
assert format_price(999999) == '$999.999K'
def test_transform_price():
assert transform_price('$1.0M') == 1000000
assert transform_price('$1.0K') == 1000
assert transform_price('$999.999K') == 999999
| [
"[email protected]"
] | |
c87540bc9dd9825c49ad701e049aeb4676691fb9 | 70734c75951d1349a4a4f66ba82a24f4726aa968 | /smartrecruiters_python_client/models/offer_actions.py | ec8c98466b681378a4f8fea77179d262dbe0250e | [
"MIT"
] | permissive | yogasukmawijaya/smartrecruiters-python-client | 0f044847ef76bbe57a3a922e7b0adb4f98c0917f | 6d0849d173a3d6718b5f0769098f4c76857f637d | refs/heads/master | 2020-04-09T16:45:41.703240 | 2017-07-08T19:59:25 | 2017-07-08T19:59:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,421 | py | # coding: utf-8
"""
Unofficial python library for the SmartRecruiters API
The SmartRecruiters API provides a platform to integrate services or applications, build apps and create fully customizable career sites. It exposes SmartRecruiters functionality and allows to connect and build software enhancing it.
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OfferActions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, candidate=None, job=None, details=None):
"""
OfferActions - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'candidate': 'Action',
'job': 'Action',
'details': 'Action'
}
self.attribute_map = {
'candidate': 'candidate',
'job': 'job',
'details': 'details'
}
self._candidate = candidate
self._job = job
self._details = details
@property
def candidate(self):
"""
Gets the candidate of this OfferActions.
:return: The candidate of this OfferActions.
:rtype: Action
"""
return self._candidate
@candidate.setter
def candidate(self, candidate):
"""
Sets the candidate of this OfferActions.
:param candidate: The candidate of this OfferActions.
:type: Action
"""
if candidate is None:
raise ValueError("Invalid value for `candidate`, must not be `None`")
self._candidate = candidate
@property
def job(self):
"""
Gets the job of this OfferActions.
:return: The job of this OfferActions.
:rtype: Action
"""
return self._job
@job.setter
def job(self, job):
"""
Sets the job of this OfferActions.
:param job: The job of this OfferActions.
:type: Action
"""
self._job = job
@property
def details(self):
"""
Gets the details of this OfferActions.
:return: The details of this OfferActions.
:rtype: Action
"""
return self._details
@details.setter
def details(self, details):
"""
Sets the details of this OfferActions.
:param details: The details of this OfferActions.
:type: Action
"""
if details is None:
raise ValueError("Invalid value for `details`, must not be `None`")
self._details = details
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OfferActions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
5b2dca9acfbc687f84b961dd18588fe6d4f6ea3a | 0aaae1ae719e2537f7a1dbc0b933ae0f4f53b7c7 | /slackminion/tests/fixtures/__init__.py | d28ff6f79f029d59cd2004aba655810519f0b151 | [
"MIT"
] | permissive | amckenna-pinterest/slackminion | 62c5012d4368a11fda8b7bb54ec86a7e2dbc60e5 | 5d07a66425e67be726df922b7406f6a55b8234ca | refs/heads/master | 2022-02-13T04:05:44.697021 | 2021-12-14T23:51:11 | 2021-12-14T23:51:11 | 181,582,063 | 0 | 0 | MIT | 2020-07-28T23:51:24 | 2019-04-15T23:54:55 | Python | UTF-8 | Python | false | false | 117 | py | import unittest
from unittest import mock
from .variables import *
from .objects import *
from .decorators import *
| [
"[email protected]"
] | |
3cc946996e81e6aeed59278e5428e194f6cca095 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/PyArgumentListInspection/typedDictMethods.py | 379ba24bd40e03af42985335dcdb66fd2793f469 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 300 | py | from typing import TypedDict
class X(TypedDict):
x: int
x = X(x=42)
x.clear()
x.setdefault(<warning descr="Parameter '__key' unfilled">)</warning>
x.setdefault('x', 43)
x1: X = {'x': 42}
x1.clear()
x1.setdefault(<warning descr="Parameter '__key' unfilled">)</warning>
x1.setdefault('x', 43)
| [
"[email protected]"
] | |
7d6b5278f6382b9ec96eb51ff742fa64443a790c | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686275109552128_0/Python/yingted/B.py | 9a2a4938be5a93c087e8a095dcea3bc84ab6efcd | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | #!/usr/bin/env python
def main():
for t in xrange(1, 1 + int(raw_input())):
print 'Case #%d:' % t,
d = int(raw_input())
a = map(int, raw_input().split())
best = max(a)
for limit in xrange(1, max(a) + 1):
cost = 0
for x in a:
cost += (x - 1) / limit
best = min(best, limit + cost)
print best
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7da78fba7888e8d6f839fea796a1bd5660686751 | b17e44d0c6bbb18b85143b0f93752ef1d60286cc | /src/tfi.py | 6f9d1f05f7cbbe88b1fbec768555b850381e32d0 | [] | no_license | zy009197/TF-Injector | 70e46ac0760c990f40c7890924fb370b24175e78 | 4eb75c3a55758f02225ca438958cd631d93f43a4 | refs/heads/master | 2023-01-27T20:34:38.239422 | 2020-12-03T02:46:21 | 2020-12-03T02:46:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | #!/usr/bin/python
import tensorflow as tf
from struct import pack, unpack
import numpy as np
from tensorflow.keras import Model, layers, datasets
import random, math
from src import config
def inject(confFile="confFiles/sample.yaml", **kwargs):
fiConf = config.config(confFile)
fiFunc = globals()[fiConf["Type"]]
return fiFunc(fiConf, **kwargs)
def shuffle_params(fiConf, **kwargs):
model = kwargs["model"]
v = model.trainable_variables[fiConf["Artifact"]] # No tf.get_collection in TF v2
v_ = tf.random.shuffle(v) # No tf.random_shuffle in TF v2
v.assign(v_) # No tf.assign in TF v2
def zeros(fiConf, **kwargs):
model = kwargs["model"]
v = model.trainable_variables[fiConf["Artifact"]]
num = v.shape.num_elements()
sz = (fiConf["Amount"] * num) / 100
sz = math.floor(sz) # Python 2.7 returns int, but need explicit rounding for Python 3.5
ind = random.sample(range(num), sz)
elem_shape = v.shape
v_ = tf.identity(v)
v_ = tf.keras.backend.flatten(v_)
v_ = tf.unstack(v_)
for item in ind:
v_[item] = 0.
v_ = tf.stack(v_)
v_ = tf.reshape(v_, elem_shape)
v.assign(v_)
def bitflip(f, pos):
f_ = pack('f', f)
b = list(unpack('BBBB', f_))
[q, r] = divmod(pos, 8)
b[q] ^= 1 << r
f_ = pack('BBBB', *b)
f = unpack('f', f_)
return f[0]
def mutate(fiConf, **kwargs):
model = kwargs["model"]
v = model.trainable_variables[fiConf["Artifact"]]
num = v.shape.num_elements()
sz = fiConf["Amount"]
ind = random.sample(range(num), sz)
elem_shape = v.shape
v_ = tf.identity(v)
v_ = tf.keras.backend.flatten(v_)
v_ = tf.unstack(v_)
if(fiConf["Bit"]=='N'):
for item in ind:
val = v_[item]
pos = random.randint(0, 31)
val_ = bitflip(val, pos)
v_[item] = val_
v_ = tf.stack(v_)
v_ = tf.reshape(v_, elem_shape)
v.assign(v_)
def shuffle(fiConf, **kwargs):
x_test = kwargs["x_test"]
y_test = kwargs["y_test"]
num = x_test.shape[0]
ind = tf.range(0, num)
ind_ = tf.random.shuffle(ind)
x_test_, y_test_ = tf.gather(x_test, ind_), tf.gather(y_test, ind_)
return (x_test_, y_test_)
def repeat(fiConf, **kwargs):
x_test = kwargs["x_test"]
y_test = kwargs["y_test"]
num = x_test.shape[0]
rep_sz = fiConf["Amount"]
rep_sz = (rep_sz * num) / 100
rep_sz = math.floor(rep_sz)
sz = num - rep_sz
ind = random.sample(range(num), sz)
x_test_, y_test_ = tf.gather(x_test, ind), tf.gather(y_test, ind)
upd = random.sample(ind, rep_sz)
x_, y_ = tf.gather(x_test, upd), tf.gather(y_test, upd)
x_test_, y_test_ = tf.concat([x_test_, x_], 0), tf.concat([y_test_, y_], 0)
return (x_test_, y_test_)
def remove(fiConf, **kwargs):
x_test = kwargs["x_test"]
y_test = kwargs["y_test"]
num = x_test.shape[0]
rem_sz = fiConf["Amount"]
rem_sz = (rem_sz * num) / 100
rem_sz = math.floor(rem_sz)
sz = num - rem_sz
ind = random.sample(range(num), sz)
x_test_, y_test_ = tf.gather(x_test, ind), tf.gather(y_test, ind)
return (x_test_, y_test_)
def noise_add(fiConf, **kwargs):
x_test = kwargs["x_test"]
num = x_test.size # Total elements from all datapoints
sz = len(x_test) # Number of datapoints
elem_shape = x_test.shape[1:] # Extract each element's shape as a tuple for reshape later
add_sz = num//sz # Number of elements in each datapoint
err_sz = fiConf["Amount"]
err_sz = (err_sz * sz) / 100 # Number of datapoints to add noise to
err_sz = math.floor(err_sz)
ind = random.sample(range(sz), err_sz)
if(fiConf["Mutation"] == "Random"):
for item in ind:
upd = np.random.standard_normal(add_sz)
x_test_ = x_test[item].flatten()
x_test_ = x_test_ + upd
x_test[item] = x_test_.reshape(elem_shape)
elif(fiConf["Mutation"] == "Gauss"):
try:
r, c, ch = x_test[0].shape
except:
r, c = x_test[0].shape
ch = 1
m = 0; v = 0.1
s = v**0.5
gauss = np.random.normal(m, s, (r, c, ch))
gauss = gauss.reshape(r, c, ch)
for item in ind:
try:
x_test[item] = x_test[item] + gauss
except:
gauss = gauss.reshape(r,c)
x_test[item] = x_test[item] + gauss
elif(fiConf["Mutation"] == "SP"):
try:
r, c, ch = x_test[0].shape
except:
r, c = x_test[0].shape
ch = 1
sp = 0.5; a = 0.04
for item in ind:
salt = np.ceil(a*(x_test[item].size)*sp)
co = [np.random.randint(0, i-1, int(salt))
for i in x_test[item].shape]
x_test[item][co] = 1
pepper = np.ceil(a*(x_test[item].size)*(1.-sp))
co = [np.random.randint(0, i-1, int(pepper))
for i in x_test[item].shape]
x_test[item][co] = 0
elif(fiConf["Mutation"] == "Speckle"):
try:
r, c, ch = x_test[0].shape
except:
r, c = x_test[0].shape
ch = 1
speckle = np.random.randn(r, c, ch)
if(ch == 1):
speckle = speckle.reshape(r, c)
else:
speckle = speckle.reshape(r, c, ch)
for item in ind:
x_test[item] = x_test[item] + x_test[item]*speckle*0.5
return x_test
def label_err(fiConf, **kwargs):
y_test = kwargs["y_test"]
num = y_test.shape[0]
err_sz = fiConf["Amount"]
err_sz = (err_sz * num) / 100
err_sz = math.floor(err_sz)
ind = random.sample(range(num), err_sz)
_, check = str(y_test.shape).split(",")
if(check==')'):
y_test = y_test.reshape(num, 1)
for item in ind:
r = list(range(0, y_test[item][0])) + list(range(y_test[item][0] + 1, 10))
y_test[item] = random.choice(r)
return y_test
def metamorph_color(fiConf, **kwargs):
'''
MR applicability: Permutation of input channels applies only to certain RGB datasets
'''
x_test = kwargs["x_test"]
permute = fiConf["Mutation"]
color = {
'R' : 0,
'G' : 1,
'B' : 2
}
# Build up the dataset according to the specified color permutation
x_test_ = x_test[:,:,:,color[permute[0]]:(color[permute[0]]+1)]
x_test_ = np.concatenate((x_test_, x_test[:,:,:,color[permute[1]]:(color[permute[1]]+1)]), axis = 3)
x_test_ = np.concatenate((x_test_, x_test[:,:,:,color[permute[2]]:(color[permute[2]]+1)]), axis = 3)
return x_test_
def metamorph_constant(fiConf, **kwargs):
'''
MR applicability: Shift of train and test features by a constant applies only for RBF kernel
'''
x_test = kwargs["x_test"]
b = float(fiConf["Mutation"])
x_test_ = x_test + b
return x_test_
def metamorph_linear(fiConf, **kwargs):
'''
MR applicability: Linear scaling of test features applies only for linear kernel
'''
x_test = kwargs["x_test"]
linear = float(fiConf["Mutation"])
W, b = linear.replace(' ', '').split(',')
W, b = float(W), float(b)
x_test_ = x_test*W + b
return x_test_
def class_add(fiConf, **kwargs):
x_name = kwargs["x_name"]
x_test = kwargs["x_test"]
y_test = kwargs["y_test"]
import tensorflow_datasets as tfds
ds = tfds.load(x_name, split='train', shuffle_files=True)
for dp in ds.take(1):
dl = list(dp.keys())
elem_shape = dp["image"].shape
if(elem_shape != x_test.shape[1:]):
raise AssertionError("Datasets' input shapes don't match")
add_sz = fiConf["Amount"]
upd = ds.take(add_sz)
x_test_, y_test_ = [], []
for item in tfds.as_numpy(upd):
x_test_.append(item["image"])
x_test = np.append(x_test, x_test_, axis = 0)
ind = random.sample(range(y_test.shape[0]), add_sz)
for i in ind:
y_test_.append(y_test[i])
y_test = np.append(y_test, y_test_, axis = 0)
return x_test, y_test | [
"[email protected]"
] | |
30e4765b23af6bbccaa8f77e4378cc9300a798c6 | 0f4b548b8dca0ed21db97add732f170ca43cfe74 | /creator/fancy/fancy_rc.py | 98e2e485d3448a4760f6a52689f33d3fffdd3bcc | [] | no_license | zeaphoo/python-creator | 65b0782431c3d708d88d3663d4d99430ed3be5ab | 38d1310ffc9bd99ba23f1bea1f2a72fb13dc97fe | refs/heads/master | 2021-08-29T14:33:18.401302 | 2012-04-30T11:03:37 | 2012-04-30T11:03:37 | 114,202,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,459 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Apr 30 12:12:26 2012
# by: The Resource Compiler for PyQt (Qt v4.8.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x01\x7b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x08\x1f\
\x0c\x1b\x23\xc3\xa4\xa8\xc3\x00\x00\x00\xfb\x49\x44\x41\x54\x68\
\xde\xed\xd9\x31\x4a\x04\x41\x10\x46\xe1\xaf\x97\x49\x65\x15\x2f\
\x60\xe4\x25\x0c\x0c\x04\xcf\x60\xb4\xb1\xe7\xd2\x40\x2f\x21\x62\
\xe6\x31\x4c\x05\x75\xdd\x4c\xdd\x99\x5d\x83\xa9\x31\x18\x30\xef\
\x86\x7a\xd0\x34\x54\x67\xfd\xf8\xbb\xa1\xaa\xa0\xc3\x15\x2e\x70\
\x2c\x69\x85\x77\x3c\xe0\xbe\x60\x85\x53\x3c\xe3\x03\x3d\x86\xbc\
\xa3\xea\x39\xc4\x19\x5e\x0a\x6e\x71\x17\x02\xbf\xf1\x83\x1d\xf6\
\xb1\x92\xba\x28\xb1\x16\x21\xf2\xba\xc3\x12\x9f\x21\xf0\x0b\xdb\
\x48\x63\x0a\xac\x5b\x64\x17\xc1\x5b\x76\x51\xec\x23\x81\xdb\xd8\
\x87\x4c\x62\xf5\x49\xdc\x4d\x85\x49\xe2\x10\xc5\xe9\x3f\xec\xf3\
\xae\x9a\xa0\x17\xef\xea\xc4\x3e\xd3\xd7\x14\x7f\xae\xe6\x12\xa5\
\xc4\xa6\x24\x9a\x4b\x4c\x1a\x25\x25\xa6\xc4\x24\x25\x26\x29\x31\
\x49\x89\x29\x31\xa9\x53\x62\x99\xed\x49\xdd\x94\xff\x24\x96\x94\
\xd8\x94\xc4\xc2\xd8\x3b\xdd\xe0\xc0\x38\xc1\xe8\x8c\xbd\xd3\x4e\
\xb6\xe0\x6a\x97\xb7\x30\x4e\xa0\x36\xd3\x50\xf8\x04\x8f\x78\x93\
\xa3\xa8\x56\x44\x1e\xe1\x12\xaf\xd3\x5c\x6a\x85\xf3\x38\x48\xda\
\x60\x8d\x27\xdc\xfc\x02\x03\xdd\x3c\x4c\x38\xc1\xe7\xb6\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x85\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x01\x27\x49\x44\x41\x54\x78\xda\xec\
\x9a\x41\x6a\x83\x50\x10\x86\xe7\x69\x29\x88\xe8\x2e\x52\xf0\x18\
\xee\x05\x5d\x7a\x84\xac\x02\xb9\x44\x4a\xa1\xd0\x6e\x7a\x8a\x42\
\x56\xbd\x81\x6e\x05\xef\xa2\x14\xb2\x12\xc5\x85\xa0\x76\x46\x9a\
\x52\x4f\xd0\x4e\xf8\x3f\xf8\x83\xeb\xff\x63\xe6\xf9\x30\x26\x8a\
\xa2\x7b\x22\x7a\xe6\x1c\x38\x21\x01\x2d\xd4\x9c\x33\xe7\xf5\x8e\
\x7f\x5e\x92\x24\x39\xc5\x71\x4c\x9e\xe7\xa1\x1a\x25\x74\x5d\x17\
\x56\x55\xf5\x54\x96\xa5\x2d\x12\x8f\x69\x9a\xd2\x34\x4d\xd4\x34\
\x0d\xda\x51\x82\xef\xfb\x24\xde\x58\xe2\x51\x24\xee\x5c\xd7\xa5\
\xba\xae\xc9\x18\x83\x76\xf4\x4c\x22\x85\xe1\x7a\xfa\xed\x44\xe2\
\x2a\xcf\xb2\x2c\x34\xa3\x8c\xeb\xd0\xad\x12\x45\x20\x24\xea\xe3\
\xea\xec\x67\x12\xb1\x4a\x95\x4f\xe2\x6f\xab\x40\x1f\x1b\x89\x98\
\x46\x3d\x2c\xcb\xb2\x95\x38\xcf\x33\x8d\xe3\x88\x66\x94\x21\xde\
\x36\x67\x22\xd6\xe9\x0d\xbc\x9d\xda\xb6\x8d\x56\x34\xbf\x9d\xe2\
\x8a\x71\x23\x57\x0c\x48\xc4\x65\x1f\xfc\x87\x75\x8a\x33\x11\xeb\
\x14\xfc\xf1\x3a\xbd\xf4\x7d\xbf\x7e\xc9\x18\x86\x01\xcd\x28\xc1\
\x71\x1c\x62\x6f\xf2\x78\x11\x89\xef\x79\x9e\x9f\xb2\x2c\xa3\x20\
\x08\xd0\x8e\x12\xda\xb6\xa5\xa2\x28\xe4\xf1\x6c\xbe\xff\x9e\xf1\
\xc6\xd9\x73\x1e\x50\x8f\x1a\x3e\x39\x1f\x9c\xc7\x2f\x01\x06\x00\
\xda\xbf\x43\xd5\xbb\xab\x01\x12\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x01\x3f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x03\x19\
\x0e\x03\x02\x5c\xe9\x9a\x67\x00\x00\x00\xbf\x49\x44\x41\x54\x38\
\xcb\xad\xd3\x31\x8a\x02\x31\x14\x87\xf1\x9f\xcb\xf6\x9e\x40\x58\
\xbd\x83\x07\x10\xef\x20\xb3\xd8\x58\x5b\xed\x0d\x1c\xbd\x80\x73\
\x21\x2b\xb1\x12\x8b\x45\xb1\x9c\xc6\x13\xc8\x76\x5b\x88\x4d\x84\
\x41\x93\x58\xe8\x6b\x42\x20\xff\x2f\xbc\xef\x25\xad\xb2\x2c\xbd\
\x52\x1f\x5e\xac\xb7\x03\x3a\x4f\xa0\xbd\x1c\xa0\x83\x0a\x3f\x09\
\x48\x81\x25\xfa\x29\xc0\x09\x6b\x0c\x22\x90\x02\x63\x1c\xf1\xdb\
\x04\x7c\xde\xdd\x52\x85\x75\xd8\xd8\x8f\x42\xf8\x80\x19\xfe\x73\
\x80\x7b\x48\x17\x5f\xa9\x70\x6e\x0a\x15\xea\x10\x3e\x63\x1e\x0b\
\xe7\x00\x45\x30\xfe\x87\x36\xa6\xa9\xb3\x29\xdb\xb7\x9e\x27\x58\
\x25\xc4\x46\x1d\x14\x11\x61\x31\xb1\x97\x18\xa0\x87\xef\x84\xb0\
\x26\x64\x1b\xc6\xfd\x00\xa8\xb1\xc0\x3e\x21\xac\x0a\xe1\x4d\xae\
\x85\xdd\x93\xa7\xbf\x79\xfb\x67\xba\x02\xec\xb8\x2b\x3f\x25\xcb\
\xf2\xbf\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x06\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x01\x1b\
\x0c\x21\x29\x84\xaa\x6a\x82\x00\x00\x01\x86\x49\x44\x41\x54\x68\
\xde\xed\x9a\xb1\x6b\xc2\x40\x14\xc6\xbf\x77\x84\x08\x47\x55\x0e\
\x3a\x3b\x4a\x15\x8a\x53\xfe\x09\xa7\xe2\xff\x14\xe7\x3a\x95\x4e\
\xed\x92\xb1\x50\x08\x75\x77\xcf\x56\xff\x8a\x58\x5b\x0e\x05\xc9\
\x69\xee\xba\xe8\x66\xa1\x1a\x49\xd3\xf0\x7e\x70\x53\x8e\x07\xf7\
\x7d\x7c\xef\xdd\x41\x28\x49\x12\x19\x86\xe1\xf3\x76\xbb\xbd\x01\
\x20\xc0\x9c\x4c\x9e\xe7\x20\x22\x08\xf1\x3b\xf9\xac\xb5\xd0\x5a\
\x23\xcb\x32\x38\xe7\x8e\xee\x91\x52\xa2\xdd\x6e\x1f\xfd\xb6\xdb\
\xed\xa0\xb5\xfe\x30\xc6\xbc\x01\xb8\xa7\xd1\x68\x14\xf5\xfb\xfd\
\x41\x10\x04\x90\x52\x9e\x7d\x90\xc9\x64\x82\x38\x8e\xab\xa4\x2d\
\xed\x97\xdb\xaf\xda\x31\x9d\x4e\x11\x45\xd1\x93\x67\x8c\xb9\x0d\
\x82\x00\x59\x96\x61\xb1\x58\x9c\x55\xac\xd3\xe9\x54\xf1\x8c\xb5\
\x35\xef\xc0\x70\x38\x44\x1c\xc7\x77\x1e\x00\xd1\x68\x34\x90\xa6\
\xe9\xd9\xc5\xac\xb5\xdc\x53\xff\x08\x29\xa5\xf2\xd8\x88\xff\x8f\
\x07\x00\xce\xb9\x42\x26\xfe\x34\x9c\x99\x12\x4d\xb4\xd6\x16\x32\
\x91\x53\x5c\x91\x24\x16\x49\x13\x27\xb1\x22\x49\xcc\xf3\x9c\x93\
\xc8\x33\x11\x30\xc6\xb0\xa2\x25\xe2\xfb\xfe\xe5\xdb\x29\x27\xb2\
\x06\xed\x94\x4d\xac\xc1\xed\x94\x4d\xac\xc1\x4c\x64\x13\xcb\xe5\
\xa0\xf7\x45\x67\x22\x3f\x35\xca\xe5\xa0\x37\x27\x91\x4d\xe4\x24\
\x56\xc5\x44\xbb\xd9\x6c\x84\x52\x0a\xcb\xe5\xb2\x50\x6f\x2e\x72\
\xc3\x65\x4e\x7f\x23\xae\xd7\x6b\x10\xd1\x97\xe7\xfb\xfe\xfb\x7c\
\x3e\x1f\xf4\x7a\x3d\x74\xbb\xdd\x42\x85\x9b\xcd\x26\xab\x5b\x12\
\xab\xd5\x0a\xb3\xd9\x0c\xad\x56\xeb\x85\x92\x24\x91\xe3\xf1\xf8\
\xd1\x18\x33\x40\x3d\x7e\xcf\x20\x6b\xed\xb5\x73\xee\x0a\x00\x88\
\x48\x0b\x21\x3e\xeb\x66\xa2\x10\x22\x55\x4a\xbd\x86\x61\xf8\xf0\
\x0d\x7b\xbf\x05\x93\x57\xbe\x96\x25\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x01\xb7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x11\x00\x00\x00\x0b\x08\x06\x00\x00\x00\x99\x20\x66\x07\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x01\x57\x49\x44\x41\x54\x28\
\xcf\x9d\x8e\x21\x8b\x32\x61\x14\x85\x9f\x77\xdf\xd1\x60\x72\xb0\
\x69\x15\x45\x50\xc1\xaa\x69\x82\xd1\x64\xb2\x08\x62\x35\x2b\xd8\
\x0c\x8a\x7f\xc0\x6c\x57\xe7\x27\x08\x03\x62\xd0\x62\x72\x44\x30\
\x4e\x98\x60\x10\x44\x9c\x32\xdc\x2f\x29\xfa\xad\x1b\x76\x0f\x9c\
\x70\x1e\xee\x3d\x1c\xe5\x79\x1e\xaf\xba\x5c\x2e\xd8\xb6\x2d\xa7\
\xd3\x09\x80\x74\x3a\x4d\xbd\x5e\x57\xf1\x78\x9c\x9f\xa4\x7c\xdf\
\x7f\x06\xdf\xf7\x19\x8f\xc7\x52\x2a\x95\xa8\x54\x2a\x00\xac\xd7\
\x6b\x76\xbb\x1d\xdd\x6e\x57\x25\x93\xc9\xcf\x25\xe7\xf3\xf9\x19\
\x26\x93\x89\xc4\x62\x31\x5a\xad\xd6\xdb\xd1\x74\x3a\xe5\x76\xbb\
\xd1\xe9\x74\xd4\xa7\x92\x2f\xc3\x30\x78\xd8\x75\x5d\x2c\xcb\xe2\
\x95\x19\x86\x81\x65\x59\xec\xf7\xfb\x6f\xfc\xc5\xc6\x5b\x6b\x24\
\x12\xe1\x13\x03\xbe\xf1\x87\x0c\xad\xf5\x33\xe4\xf3\x79\x1c\xc7\
\x21\x93\xc9\xbc\x1d\x39\x8e\x43\xb1\x58\x44\x6b\xcd\x6c\x36\x93\
\xc5\x62\xf1\xf6\xf3\x2c\x09\x82\x80\xeb\xf5\xca\x76\xbb\x25\x0c\
\x43\xaa\xd5\x2a\x00\xcb\xe5\x92\xcd\x66\xc3\x70\x38\x54\x5a\x6b\
\x1a\x8d\x86\x52\x4a\xc9\x7c\x3e\xa7\x50\x28\xd0\xef\xf7\x95\x0a\
\xc3\x90\x20\x08\x18\x0c\x06\x72\x38\x1c\x30\x4d\x93\x6c\x36\xcb\
\xf1\x78\x04\x20\x97\xcb\xd1\x6e\xb7\x55\x22\x91\xf8\x7f\x9d\x94\
\xcb\x65\x15\x8d\x46\xe1\x7e\xbf\xd3\xeb\xf5\xa4\x56\xab\x49\xb3\
\xd9\x14\xcf\xf3\x10\x91\x5f\xf9\x6b\xb5\x5a\x89\xeb\xba\x98\xa6\
\xc9\x68\x34\x52\xa9\x54\x8a\x5f\x4b\x44\xb0\x6d\xfb\x4f\x0b\x1e\
\xfe\x07\x7a\x2f\xd2\x84\xe2\x7f\x93\xc2\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xd2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x01\x1b\
\x0c\x17\x13\xcb\xba\x22\x45\x00\x00\x01\x52\x49\x44\x41\x54\x68\
\xde\xed\x9a\x3d\x6a\xc3\x40\x14\x84\x3f\x49\xdb\x48\x02\x43\x44\
\x70\xe3\xd2\x4d\x88\x10\x02\xbb\x75\xa5\x13\xe4\x06\xb9\x90\x8b\
\x74\x41\xa4\xf1\x05\x02\x69\xa2\x56\x60\x1f\x21\x07\x90\x2a\x17\
\x21\x10\xeb\xa7\xb0\x1b\x39\x4d\x16\x02\xe9\x24\xb3\xb1\xc3\xfb\
\xfa\x7d\xc5\x8c\x66\x76\x57\xac\x35\x9f\xcf\x3d\x60\x05\xdc\x00\
\x36\xc2\xa5\xf0\x01\xbc\x02\x0f\x0a\x78\x8a\xa2\xe8\x36\x8e\x63\
\x7c\xdf\xef\x3d\x31\x4d\x53\x96\xcb\xa5\x48\x6b\x88\xa6\x69\xae\
\xd7\xeb\xf5\x7d\x9e\xe7\x9d\x02\xa2\xd9\x6c\x46\x5d\xd7\x94\x65\
\xd9\x6b\x60\x18\x86\x00\x6c\xb7\x5b\x51\xd7\x10\xa3\xd1\x88\x24\
\x49\xc8\xf3\xfc\x4e\x01\xb6\xe7\x79\x14\x45\x81\x6d\xf7\x6b\x53\
\xbd\xce\xb2\x2c\x51\xd7\x5c\x12\x99\x4c\x26\x00\x57\x4a\x8b\xdf\
\xd7\xc0\x9f\xe6\x0d\x99\x21\xf4\xd7\x5d\x69\xf1\x87\x18\xa0\xd7\
\x8a\x89\x66\xd1\x7a\x9f\x34\x89\x52\xa7\x7f\x98\x44\xa9\xd3\xcb\
\xe6\xa4\x75\xea\x38\x8e\x28\x6a\x88\xe3\xf1\xf8\xdb\xc4\x21\x06\
\x68\x13\x0f\x87\x83\xa8\x6b\x90\xae\xeb\xa4\x4e\xe5\x74\x2a\x75\
\x7a\x5e\xa7\xd3\x53\xd5\xa9\x24\xf1\x1f\x5c\x31\xc4\x44\xb9\xec\
\x0b\xe7\x90\x44\xd9\x13\xa5\x4e\x85\x01\x75\xda\xed\xf7\x7b\x3b\
\x08\x02\x76\xbb\xdd\xa0\x2f\x42\x7e\xbb\x99\xc3\x75\x5d\xda\xb6\
\x05\xf8\x54\xc0\xdb\x66\xb3\x89\x17\x8b\x05\xd3\xe9\x74\xd0\xe0\
\xf1\x78\x2c\xea\x1a\xa2\xaa\x2a\xb2\x2c\x03\x78\xb6\xbe\x9f\x67\
\xa4\x40\x8c\x3c\xcf\xb8\x24\xde\x81\x17\xe0\xf1\x0b\xa2\xa2\x47\
\xf9\x28\x12\x94\x4e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\xac\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x0e\x00\x00\x00\x0e\x08\x06\x00\x00\x00\x1f\x48\x2d\xd1\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xd7\x0a\x0b\x0b\x28\x27\xaa\x5c\xed\xb7\x00\x00\x00\x1d\x74\x45\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x43\x72\x65\x61\x74\x65\
\x64\x20\x77\x69\x74\x68\x20\x54\x68\x65\x20\x47\x49\x4d\x50\xef\
\x64\x25\x6e\x00\x00\x00\x10\x49\x44\x41\x54\x28\xcf\x63\x60\x18\
\x05\xa3\x60\x14\x60\x07\x00\x03\x1e\x00\x01\x38\xa1\xa3\x1f\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x59\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x08\x00\x00\x00\x08\x08\x06\x00\x00\x00\xc4\x0f\xbe\x8b\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x06\xec\x00\x00\x06\xec\
\x01\x1e\x75\x38\x35\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\xd6\x49\x44\
\x41\x54\x18\x95\x7d\x8e\xb1\x6a\x83\x50\x00\x45\xef\x0b\x12\x28\
\xa4\x73\x21\x04\xcc\x62\xc4\x42\x97\x44\xc8\x90\x5f\x70\x0a\x08\
\xfd\x8a\x76\xcb\x52\xc8\x5f\x14\xb7\x82\x5d\x3a\x26\x20\x12\xb2\
\xf8\x0b\x46\x78\x93\xe0\x26\x3c\x10\x41\x37\x9f\x2e\x37\x4b\xd2\
\x31\x77\xb9\x67\xb8\x1c\xae\x20\x89\x47\x19\xdd\xda\x03\x30\xb9\
\xf1\x02\xc0\xcb\xff\x40\x08\x31\xca\xf3\xfc\x7d\x18\x86\x6f\x00\
\x93\x2c\xcb\xb6\x5a\xeb\x5f\x00\x33\x00\x00\x49\x98\xa6\xb9\x92\
\x52\x1e\xfa\xbe\x0f\x3d\xcf\x7b\x4d\x92\xe4\xa7\xeb\xba\x33\xc9\
\x19\xee\x1f\xa2\x28\xfa\xa8\xeb\x5a\x16\x45\xb1\xf6\x7d\xff\x4d\
\x29\x75\xa9\xaa\x6a\x0f\x92\xd0\x5a\x6f\x9a\xa6\xb9\x04\x41\xf0\
\x19\xc7\xf1\x73\xdb\xb6\x7f\x69\x9a\x1e\x6d\xdb\x9e\x82\x24\x94\
\x52\xa7\x30\x0c\xbf\x00\x8c\xcb\xb2\xdc\x49\x29\x63\xd7\x75\xcd\
\xbb\x5d\x18\x86\xb1\x02\xf0\x44\x12\x8e\xe3\x2c\x2d\xcb\x9a\x93\
\x04\x49\x5c\x01\xdc\x10\x6b\xc9\x0b\xed\xb1\x8c\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xe3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x01\x1b\
\x0c\x16\x08\x58\xc4\xda\xe8\x00\x00\x01\x63\x49\x44\x41\x54\x68\
\xde\xed\x9a\xb1\x6a\x83\x50\x18\x85\x8f\x17\x51\x10\x11\x25\x73\
\x32\x86\x1a\x28\x4e\xf7\x3d\x4a\x5e\xc4\x07\x70\x37\x8e\xcd\x54\
\x3a\xb5\x4b\xc7\x42\x97\xbe\x44\x92\xc9\xfa\x02\xae\xa6\x52\xb2\
\x28\x31\xe4\xda\xe9\x76\xe8\x94\x7a\xc5\x54\xf8\xbf\xfd\xff\x87\
\x73\x3c\xff\xb9\x83\xda\x66\xb3\xb1\x56\xab\xd5\xf3\xe9\x74\xba\
\x01\xc0\x40\x8c\x02\xc6\xd8\xa7\xe3\x38\xef\x49\x92\xdc\x6b\xcb\
\xe5\xf2\x65\xb1\x58\x04\x9c\x73\x58\x96\xd5\x79\xe9\x7a\xbd\x46\
\x18\x86\xa4\xee\x40\x54\x55\x85\xed\x76\x8b\x3c\xcf\x9f\xf4\xa6\
\x69\x6e\x39\xe7\x38\x1e\x8f\xd8\xef\xf7\x9d\x16\xce\x66\x33\x00\
\x40\x9e\xe7\xa4\xee\x40\x38\x8e\x03\xce\x39\xd2\x34\xbd\xd3\x01\
\x30\xd3\x34\x51\x14\x45\xe7\x85\x42\x08\x00\x40\xdb\xb6\xa4\xee\
\x40\x1c\x0e\x07\x4c\xa7\x53\x08\x21\x3c\xfd\xb7\x11\x2a\xf4\xb1\
\x83\xf8\x3b\xba\x4c\x90\x8a\x01\x32\x81\x64\xe2\xb0\x48\xdd\x75\
\x29\xbe\x8a\x01\x72\x96\x4c\x1c\x16\xa9\xf7\x4f\x12\x55\xfa\x4c\
\xce\x52\x27\x5e\x39\x89\xe7\xf3\x59\xf9\x8b\x50\xd9\x41\xf4\x90\
\x44\xea\xc4\x91\x27\x91\xce\x29\x9d\x53\x3a\xa7\xff\xe1\x9c\xd2\
\xeb\x94\x3a\x91\x3a\x91\x3a\x91\xe8\xcd\x44\x4a\x22\x99\x48\x26\
\x5e\xd9\x44\x51\xd7\x35\xf3\x3c\x0f\x65\x59\xd2\xc3\x66\x24\x4c\
\x26\x13\xd4\x75\x0d\xc6\xd8\x97\x6e\x18\xc6\x47\x96\x65\x81\xef\
\xfb\x98\xcf\xe7\x4a\x8b\x55\xe7\x89\xcb\xa9\xaa\x0a\x59\x96\xc1\
\x75\xdd\x57\x6d\xb7\xdb\x59\x71\x1c\x3f\x36\x4d\x13\x80\x7e\xcf\
\x18\xcd\x19\x65\x8c\x15\xb6\x6d\xbf\x45\x51\xf4\xf0\x0d\x01\xa0\
\x1c\xf0\x5d\x5a\x5f\x70\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x01\x20\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x00\xc2\x49\x44\x41\x54\x78\xda\x62\
\xfc\xff\xff\x3f\x03\x25\x80\x89\x81\x42\x40\x75\x03\x64\x09\x18\
\xaa\x84\x21\x02\x0a\x03\x28\x96\x05\xe2\xb5\x40\x5c\x04\xc4\x4c\
\x48\xe2\x30\x1c\x01\xc4\x1b\x81\xd8\x14\x59\x1c\xd9\xb6\xc7\x40\
\x7c\x18\x88\x1d\x81\xb8\x00\xcd\x25\x11\x40\x1c\x0d\xc4\xd7\x80\
\xf8\x22\x2e\x17\xc0\x70\x01\x10\x6f\x46\x72\x49\x04\x94\xdf\x0e\
\xc4\x6c\xe8\xea\x59\xb0\xf8\x73\x02\x94\x76\x06\x62\x45\x20\x56\
\x00\xe2\x2b\x40\x5c\x0f\xc4\xbf\xd0\x15\x33\xe2\x49\x07\x13\xa1\
\x81\xf6\x11\x88\x53\x80\xf8\x07\x29\xd1\x18\x01\xd5\xfc\x19\x88\
\xf9\x81\x38\x0b\x97\x5a\x26\x1c\x9a\xa3\xa1\xce\x4e\x00\xe2\xbd\
\x38\x02\x16\x6b\x20\xe2\x0a\x30\xf4\x80\x85\xeb\x41\xd6\xac\x04\
\x8d\x67\xac\xa1\x8d\x64\x88\x2d\x2e\x03\x40\xd8\x18\x87\x66\x18\
\xb6\x46\x17\x63\x1c\xf0\xdc\x08\x10\x60\x00\x30\x70\x14\xce\xc5\
\x6b\x2f\x42\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xe8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x06\x18\
\x0c\x28\x1f\xf1\x1b\xe4\x09\x00\x00\x01\x68\x49\x44\x41\x54\x68\
\xde\xed\x9a\xc1\x4a\xc3\x40\x14\x45\xcf\x34\x63\x2a\x25\xa4\xb5\
\xbf\x92\xad\x1b\x3f\xa0\x5d\x44\xd0\x8d\xd0\x4f\x93\x82\x2b\x17\
\x2d\xb4\x5b\xa1\x1b\xbf\x47\x3b\x05\x63\x4d\x93\x8c\x9b\xa9\xc6\
\x58\x5d\xd5\x96\x91\x77\xe0\x41\x02\x59\xdd\x3b\x97\x3b\x33\x44\
\x25\x49\xa2\x81\x2b\xe0\x02\xe8\x23\xf8\xc2\x13\xb0\x00\xee\x35\
\x70\x3d\x1a\x8d\xd2\x34\x4d\xe9\xf5\x7a\x22\x8d\x27\x2c\x97\xcb\
\xfe\x64\x32\xb9\x1c\x8f\xc7\x4a\x25\x49\x72\x3b\x9f\xcf\xe3\x4e\
\xa7\x43\x59\x96\xa2\x8e\x27\x04\x41\x40\x96\x65\x0c\x06\x83\x95\
\x06\xe2\x28\x8a\xc8\xf3\x5c\x94\xf1\x88\xa2\x28\x88\xa2\x08\x20\
\xd6\x00\xd6\x5a\xac\xb5\xa2\xcc\xdf\xa3\x80\xbd\x09\xbd\xf5\xec\
\xc3\xc4\xaa\xaa\x44\xe2\xc3\x98\xa8\x81\x16\x10\xb8\xf7\xfa\x50\
\x33\xb9\x6e\x78\xee\xc6\xfe\x6a\xa2\x24\xf1\x20\x54\x6e\xb6\x68\
\x67\xa6\x06\x4e\x81\x10\x38\x71\xd3\x72\xdf\x18\xe0\x75\x57\x82\
\xbf\x98\x58\x55\x95\x24\xf1\x48\xd5\xe6\xe6\x0d\x78\x69\xa4\xaf\
\x99\xcc\xef\x2b\xc2\x79\xa6\xb7\x25\x59\x96\x25\x4a\x29\x91\xf5\
\xb8\xd8\x1f\x9e\x77\xf6\x61\x51\x14\x9f\x26\x6e\x36\x1b\xb2\x2c\
\x13\x09\x7d\x2b\x58\x17\x3a\xd9\xd8\xf8\x1c\xdb\x66\x27\xca\x41\
\xdf\xc3\x5d\x52\xbd\x13\x25\x89\xff\x24\x89\x62\xa2\x24\x51\x90\
\x4e\x14\xf6\x92\x44\xb9\xb1\xf1\x3b\x89\x2b\x63\x4c\x1c\x86\x21\
\xeb\xf5\x5a\x94\xf1\x84\x76\xbb\x8d\x31\x06\x60\xa5\x81\x87\xe9\
\x74\x9a\x0e\x87\x43\xba\xdd\xae\xa8\xe3\x09\xc6\x18\x66\xb3\x19\
\xc0\x42\xb9\xdf\x33\x6e\x80\x73\xe0\x4c\xe4\xf1\x86\x67\xe0\x11\
\xb8\x7b\x07\xc3\x33\xbe\x98\x76\x1c\x17\x81\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xd8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x01\x1b\
\x0c\x20\x3b\x6e\x08\x2a\x8b\x00\x00\x01\x58\x49\x44\x41\x54\x68\
\xde\xed\x9a\x41\x6a\x83\x50\x14\x45\x8f\xe2\x40\x1a\x21\x94\x84\
\x2c\xc0\x51\x29\x14\x07\x19\x38\xd1\x3d\xb8\x83\xae\x25\xe0\x02\
\x3a\x2b\xa5\x93\x6e\xa0\x50\x07\x75\x15\x21\x93\xee\x21\x90\x5a\
\x5a\x14\x22\x99\xe4\xff\x4e\xb4\xf3\xaa\x68\x94\x77\xe0\x4f\x1f\
\xfc\x77\xbd\xf7\x3f\xe1\x19\xeb\xf5\xfa\x0a\x78\x01\x6e\x00\x13\
\x61\x2c\x7c\x01\xef\xc0\x83\x05\x3c\xfb\xbe\x7f\x1b\x04\x01\x8e\
\xe3\x34\xae\xb8\xd9\x6c\xd8\x6e\xb7\x97\x74\x49\xa3\x3a\xba\x3a\
\x93\x42\x6b\xbd\x4c\x92\xe4\x3e\x8e\x63\x65\x01\x77\x61\x18\x72\
\x3a\x9d\x38\x1c\x0e\x8d\x0a\xba\xae\x7b\x91\xf7\x9c\xa2\x78\x7f\
\x5f\xa8\x61\x10\x45\x11\x71\x1c\x47\x16\x60\xda\xb6\xcd\x7e\xbf\
\x6f\x5c\x50\x29\x25\xe1\x36\x1c\xd7\x56\x65\xcd\x56\x42\x68\xad\
\xa5\x95\x03\x22\x22\x4e\x45\x44\xa5\x54\x2b\x11\x25\x4e\xc5\x89\
\x42\x57\x4e\x3c\x9f\xcf\xad\x9d\x28\x62\xf6\xfa\x8b\x81\x69\x9a\
\xdd\xc7\xe9\xf1\x78\x94\xee\xf6\xc8\x6c\x36\xeb\x3e\x4e\xe5\x6d\
\xec\xdf\x8d\x9d\xc7\x69\x9b\x1a\xc2\xff\xa9\xfb\xdd\x69\x9c\x8a\
\x13\x27\x30\x9d\xca\x60\xd3\x2f\xb5\x66\x9d\x8a\x28\x71\x3a\xf0\
\x9b\x28\x71\x3a\x72\x11\x25\x4e\xc7\x1f\xa7\xaa\x2c\x4b\x73\xb1\
\x58\x90\x65\x99\x4c\xa7\x23\xc1\xb6\x6d\x8a\xa2\x00\xf8\xb1\x80\
\x8f\xdd\x6e\xe7\x79\x9e\xc7\x6a\xb5\x6a\x55\x78\x3e\x9f\x4b\x77\
\x7b\x22\xcf\x73\xd2\x34\x05\x78\x35\xaa\xf5\x8c\x27\xc0\x63\x1a\
\xeb\x19\x06\xb0\x04\xea\x35\x85\x1c\xf8\x9e\xa0\x8e\x9f\xc0\x1b\
\xf0\xf8\x0b\x38\x4b\xe9\xf4\x8a\x00\xfc\x00\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x7b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x08\x1f\
\x0c\x1b\x00\x61\xc3\xd9\xb1\x00\x00\x00\xfb\x49\x44\x41\x54\x68\
\xde\xed\xda\x31\x4a\x03\x41\x14\x80\xe1\x6f\x92\xf5\x00\x41\x4c\
\x65\xe7\x65\x2c\xc4\x0b\xa4\xb6\xcb\x3d\x2c\x6c\xed\xbc\x81\xb5\
\xa0\xe0\x75\xb4\x50\x08\xc1\x42\x25\x71\x65\x2d\x76\x56\xc2\x9c\
\x60\x06\xde\x0f\xc3\xb2\xaf\xdc\x8f\x69\x66\x36\xe1\x08\x57\xb8\
\xc0\x52\xd4\x4a\xef\x78\xc0\x5d\xc2\x1a\x67\x78\xc4\x1b\xf6\xe8\
\x31\xc4\x77\xaa\xb6\x84\x13\x5c\xe2\x25\xe1\x19\xd7\x19\xf0\x1b\
\xbb\x03\xc4\x80\xac\x13\x30\xa1\xcb\x90\x37\x1d\x16\xd8\x64\xc0\
\xcf\x02\x31\xaa\x17\xb2\xcb\x4e\x8b\x2e\x0f\xf7\x19\x6f\x17\x3b\
\xb1\x99\x9d\xf8\x3b\x0d\x26\xc4\xfe\x70\x0d\xc3\xf0\x13\xdf\xaa\
\x72\xc9\x94\x60\x0e\xb3\x3c\x1b\x8a\x15\xd5\xdf\xbf\xd5\xac\x18\
\x0a\xc4\xa6\x10\x95\x88\x51\xa3\x05\x62\x20\x46\x81\x18\x05\x62\
\x14\x88\x81\x18\xd5\x89\x98\x8a\x67\x54\x77\xa9\x44\x4c\xc5\x8a\
\xda\x40\x4c\x8c\x67\xa7\x5b\xe3\x4d\xc6\x57\x7e\xef\xf3\xb9\x5c\
\x1c\xc1\xd5\x8d\x37\xc7\x31\xb6\xd3\xa5\xf0\x29\xee\xf1\x2a\xae\
\xa2\x5a\x81\x5c\x62\x85\x8f\x64\xfc\x3d\x63\x8d\xf3\x2c\x1b\xb5\
\xd1\x06\x4f\xb8\xfd\x03\xb2\xc4\x40\x85\x5d\x70\x5d\x27\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x5f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x0e\x00\x00\x00\x0e\x08\x06\x00\x00\x00\x1f\x48\x2d\xd1\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x06\x18\
\x11\x12\x3b\xfc\x4f\xee\x82\x00\x00\x00\xdf\x49\x44\x41\x54\x28\
\xcf\x9d\x52\x2b\x0e\x83\x40\x10\x9d\x25\x35\x5c\x80\x23\x70\x81\
\x5a\x1c\x02\x1c\x7a\x21\x69\x4a\xd6\x8c\xee\x01\x10\x1c\xa0\x47\
\x58\x47\x57\xe3\x4a\x42\xd2\x53\xd4\xa0\x70\x4d\x46\x37\x41\x4e\
\x4d\x69\x1a\x3e\xdb\xcf\xba\x79\xf3\x79\x33\x6f\x1f\xc0\x9f\x4f\
\x2c\x81\x45\x51\x00\x11\x55\x00\x00\x9e\xe7\x65\x65\x59\xce\x6a\
\x9c\x29\x80\x88\x15\x11\xb1\x52\xea\xae\x94\xba\x13\x11\x23\x62\
\x65\xa5\x47\x44\xd6\x5a\x1f\x98\x39\x1a\x31\x66\x8e\xb4\xd6\x07\
\x44\x64\x2b\x63\x9e\xe7\x57\x21\x44\xf3\xba\x45\x88\xa6\xef\xfb\
\xe3\x38\x64\xb5\x11\x00\xb6\x96\xa5\xb6\x33\x71\x92\x24\xa9\x5c\
\xd7\x4d\x6d\xa7\x0c\xc3\x70\xaa\xeb\x3a\x9b\x32\xa6\x5f\xfc\x42\
\x3a\xae\xeb\xac\x55\x18\x63\x62\x63\x4c\xbc\x96\x77\x6c\xe3\xa5\
\x94\xbb\x9f\x1b\xa5\x94\x67\xdf\xf7\xb3\xbf\x18\xbb\xae\xe3\xb5\
\xdc\xe6\x5d\xb1\x4f\x02\x05\x41\x70\x59\xf4\x6a\xdb\xb6\xfb\xf7\
\x38\x0c\xc3\xdb\xcc\xdc\x4f\x73\x3c\x00\xc7\xd4\x52\xac\x85\x38\
\xe9\xa8\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xb4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x0f\x00\x00\x00\x0e\x08\x06\x00\x00\x00\xf0\x8a\x46\xef\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x01\x1b\
\x0e\x1d\x1a\x4b\x0d\xa6\x05\x00\x00\x01\x34\x49\x44\x41\x54\x28\
\xcf\x95\x91\xbd\x6e\x02\x31\x10\x84\x67\x6d\x43\x6c\x0e\x94\xd0\
\x21\x51\x5e\x28\x29\x68\xd2\x52\xe7\x29\x79\x8d\xd4\x91\x52\xd0\
\x5c\x01\xe2\x09\xa8\x88\x22\xa2\x44\x81\x33\x7b\xfe\x49\xe3\x8b\
\x0e\x12\x44\x58\xc9\xb2\xe4\xd5\x37\x33\xeb\xa5\x3c\xcf\x5f\xb3\
\x2c\xc3\xb5\xb5\xdf\xef\x6f\x94\x10\xc2\x14\x45\xd1\x65\x66\x08\
\x21\x62\xea\x11\x80\x78\x0e\x0c\x21\xd0\x64\x32\x99\x2b\x63\x4c\
\xb9\x58\x2c\xb2\xe1\x70\x18\xea\xa6\x73\x4e\x4c\xa7\x53\x32\xc6\
\xbc\x37\x45\x94\x52\xfd\xd9\x6c\x86\xc1\x60\xe0\xdb\xed\xf6\xbd\
\x3a\xa7\x6e\x8c\xf9\x58\x2e\x97\x8f\x00\x3e\x01\x78\x00\x62\x3c\
\x1e\xbf\x00\xe8\xff\x88\x5d\x18\xad\x02\xc0\xe9\x6e\x9d\x36\xff\
\x84\xbd\xf7\xd4\x80\x2d\x00\x07\xc0\x13\xd1\x65\xb8\xa9\x93\xc0\
\x0a\x00\x62\x8c\xf1\x1a\x58\xa5\xb8\x11\x80\xfc\x57\x6c\x29\x65\
\xed\xd0\x02\xa0\x13\x28\xd2\x0a\x8f\xe1\x10\x02\x9d\x71\x6e\xa5\
\x23\x00\x08\x6b\x6d\xef\xa2\xb3\x73\x8e\x98\xf9\x76\x34\x1a\x3d\
\x6b\xad\xbf\xea\x5d\x2b\xa5\x54\x59\x96\x74\x04\x33\x33\x98\x59\
\xd4\x8f\x5a\x6b\xbf\x5a\xad\x64\x55\x55\x0a\xc0\x5d\xf3\x97\xb7\
\xdb\xad\x3f\x1c\x0e\xf2\xc8\x79\xb3\xd9\x34\xa3\xcb\xf5\x7a\x8d\
\xd3\x19\xeb\x5e\x5a\xe7\x1b\xe5\x79\xfe\xd4\xe9\x74\x1e\xac\xb5\
\xdd\x14\xb1\x59\x91\x88\x7e\xad\x88\x88\xb0\xdb\xed\x7a\xdf\x81\
\x2a\x74\x86\x79\xee\xc6\xb4\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x02\x50\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\xcd\x49\x44\
\x41\x54\x38\x8d\xad\x93\x3f\x68\x5a\x51\x14\xc6\xbf\xfb\xa2\xbc\
\x48\xa4\x11\x3a\x14\x87\x20\x0e\xc1\x90\x4e\x6d\x96\xb6\xd9\x02\
\x12\x48\x88\xb8\x15\x27\xc7\xd6\x52\x08\x2e\x05\x63\x87\x60\xe9\
\x52\x3a\x74\xb1\x1d\x42\xc9\x94\xbd\x0d\xc9\x20\x28\x4f\x93\xd0\
\xc1\x48\x2a\x2a\xe2\x5e\xf0\x4f\x85\xf7\xec\xb5\xdc\xbe\xe7\xc3\
\x77\x3a\x54\xe9\x8b\x58\x5a\xb0\x07\xbe\xe5\x9c\xfb\xfd\x38\x9c\
\x73\x2e\x23\x22\xcc\x12\xd2\x4c\xee\xff\x01\x70\x4c\xc9\xcd\x03\
\x58\x98\xc8\x99\x00\xf8\xdf\x00\x37\x07\x83\xc1\x13\x00\x5b\x93\
\x60\x87\xc3\x01\x49\x92\x8a\x00\xde\x03\xb8\xba\x46\x20\x22\x10\
\x11\x2a\x95\xca\xab\x6a\xb5\xfa\x21\x1a\x8d\x86\x01\xdc\xb3\xcb\
\xef\xf7\xef\x28\x8a\xf2\xc2\xb2\xac\x63\x22\x92\xc6\x1e\x22\xfa\
\x05\x00\xe0\xe9\x76\xbb\xe5\x46\xa3\xb1\x62\x2f\x8e\xe4\xa9\xd7\
\xeb\xb7\x82\xc1\xe0\x66\xbf\xdf\xcf\x11\xd1\x06\x11\xc9\x93\x80\
\x95\x5e\xaf\x77\x46\x44\xb7\x6d\xc6\x75\x21\xc4\x91\xae\xeb\x45\
\x5d\xd7\x3f\x71\xce\x2f\x4d\xd3\xfc\x66\x18\xc6\x67\xcb\xb2\xce\
\x88\x68\x8f\x88\x3c\xe3\x2d\x7c\x2f\x97\xcb\x17\x00\x9e\x01\x78\
\xa0\x28\xca\xaa\xaa\xaa\x6f\x73\xb9\xdc\x97\x58\x2c\xf6\x32\x91\
\x48\xbc\xa9\xd5\x6a\xb5\x66\xb3\x59\x94\x65\xf9\x69\x28\x14\x7a\
\xd7\xe9\x74\x02\x00\x1e\x8e\x3b\x98\xf3\xf9\x7c\xdb\xd9\x6c\x76\
\x8f\x73\xfe\x91\x73\x5e\x37\x0c\xa3\x2b\x84\x28\x08\x21\x0a\xaa\
\xaa\x9e\xe7\xf3\xf9\x83\x4c\x26\x13\x18\xbd\xbf\x11\x8f\xc7\x1f\
\x0f\x87\xc3\x93\xdf\xc3\x00\x9c\x00\xfc\x00\xd6\x92\xc9\xe4\x6e\
\xbb\xdd\x56\x00\xac\x8d\x14\x00\xe0\xb2\xcf\x26\x9d\x4e\x3f\xe2\
\x9c\x1f\xb3\x69\xa7\xec\x74\x3a\xef\x6b\x9a\xb6\xef\x76\xbb\xb3\
\x00\x4e\x01\x88\x6b\x47\x61\x9a\xcb\xad\x56\xeb\xb9\xcb\xe5\x3a\
\x9c\x0a\x60\x8c\x79\x23\x91\xc8\x46\x2a\x95\xba\xeb\xf5\x7a\xef\
\x30\xc6\xe6\x6c\x6b\x67\x9a\xa6\xfd\x28\x95\x4a\x85\x70\x38\xfc\
\x7a\x2a\x60\x04\x59\x04\xb0\x04\x40\x9e\x28\x11\x80\xaf\x00\x9a\
\x44\x64\xfd\x11\xf0\xaf\x31\xf3\x67\xfa\x09\x99\x54\x1d\x35\xd5\
\x03\x14\xea\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x98\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x15\x49\x44\
\x41\x54\x38\x8d\xa5\xd3\x31\x4e\x85\x40\x10\x06\xe0\x7f\x78\x88\
\x22\xb8\x34\x48\x01\x47\x80\x92\x3b\x78\x03\x4e\xe2\x11\x4c\x34\
\xc6\xc6\xcb\xa8\x2d\x8d\xdc\x80\x0b\x10\x20\xa1\x40\xcc\x46\x1b\
\x5c\xc6\xc6\x18\x17\xd9\xd7\x40\x39\x61\xbe\xf9\x67\x02\xc4\xcc\
\xd8\xf3\x58\xbb\xba\x01\xd8\x1b\xb5\x4b\x00\x9e\xe1\xfd\x77\x00\
\x6f\x7f\x0b\xb4\x5a\xc1\x07\xf0\x04\x60\x32\x00\xa7\x00\xae\x8e\
\x25\x38\x19\xc7\x51\xa4\x69\x7a\xbb\xee\x0c\x82\xc0\xae\xaa\xea\
\x3e\xcb\xb2\xf3\xb6\x6d\x3f\x4d\x80\x62\xe6\x43\xdf\xf7\xcf\x1b\
\xd3\x0f\x52\xca\x3b\xc7\x71\x1c\x00\xbf\xc0\xfa\x88\x0c\x80\x0c\
\xf1\x19\x80\x35\x4d\xfa\x76\xeb\x04\x0b\x11\x59\x00\x2e\x0c\x08\
\x84\x10\xda\x80\x7f\x80\x65\x59\x36\x33\xbf\x6c\xf4\x2e\x00\x90\
\x24\xc9\x71\x60\x59\x16\x15\xc7\xf1\xcd\xd6\xf4\xba\xae\x1f\xc3\
\x30\xd4\xd6\x5e\x03\x4c\x44\x64\x38\x22\xa4\x94\x0f\xae\xeb\x6a\
\x09\x34\xad\x2c\xcb\x85\xc8\x74\x43\x80\x99\x39\x8a\x22\x73\x82\
\x61\x18\xf8\x07\x75\x4c\x80\xe7\xe9\x1f\xa9\x06\x14\x45\xc1\x5d\
\xd7\x8d\x42\x88\xd7\x2d\x60\x9e\xe7\x8f\xa6\x69\x94\x11\x00\xc0\
\x79\x9e\x5f\x2b\xa5\xce\xb6\x00\x22\xfa\xf2\x7d\x5f\x6a\xb5\xbd\
\xbf\xf3\x37\x93\x62\x65\xf4\x0e\xa2\x0e\x94\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xb2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x01\x1b\
\x0c\x15\x35\x2b\x81\xc5\x3a\x00\x00\x01\x32\x49\x44\x41\x54\x68\
\xde\xed\x9a\xb1\x6a\x83\x50\x00\x45\x4f\x82\x83\x94\x80\x48\xaa\
\x1f\x90\xa9\x54\x8a\x83\x83\x4b\xfc\x87\xfe\x41\x3f\x45\xfc\x83\
\x6e\xa5\x74\xc9\x0f\x14\xba\xf4\x0f\xb2\xb8\xb8\x74\x76\x15\xd2\
\x14\x5c\xa2\x64\xc9\xb3\x4b\x03\xed\xaa\xd4\x90\x70\xcf\xf2\xb6\
\x3b\xdc\xfb\xee\x85\x07\x6f\x12\x45\xd1\x15\xb0\x02\x6e\x80\x29\
\xe2\x5c\xf8\x02\xde\x81\x47\x0b\x78\x89\xe3\xf8\x76\xb9\x5c\x32\
\x9b\xcd\x7a\x2b\x66\x59\x46\x9a\xa6\xb2\x76\x24\x76\xbb\xdd\xf5\
\x7a\xbd\x7e\xc8\xf3\xdc\x58\xc0\x5d\x92\x24\xec\xf7\x7b\x36\x9b\
\x4d\x2f\xc1\xc5\x62\x01\x40\x59\x96\x72\x77\x24\x1c\xc7\x21\x49\
\x12\xf2\x3c\xbf\xb7\x80\xa9\x6d\xdb\x54\x55\xd5\x5b\xd0\x18\xf3\
\xe7\x14\xff\x4f\x5d\xd7\xb8\xae\x0b\xe0\x5a\x00\x5d\xd7\x0d\x0a\
\xa0\xeb\x3a\x85\x78\x02\x8e\xbe\x2b\xc4\x4b\x09\xd1\x18\x33\x28\
\x00\xcd\xe9\x69\x38\xfa\xad\x26\x5e\x52\x13\x0f\x87\xc3\xe0\x1b\
\x31\x44\x43\x0c\x6c\xa2\xe6\x54\x73\xaa\x39\xd5\x9c\x0a\xcd\xa9\
\x42\xd4\x9c\xea\xb1\xaf\x10\xf5\xd8\x17\x9a\x53\x35\xf1\x57\x13\
\x4d\xdb\xb6\xd3\xf9\x7c\xce\x76\xbb\x55\x13\xcf\x04\xcf\xf3\x68\
\xdb\x16\xa0\xb6\x80\x8f\xa2\x28\xc2\x30\x0c\xf1\x7d\x7f\x90\x70\
\x10\x04\x72\x77\x24\x9a\xa6\xa1\x28\x0a\x80\xd7\xc9\xcf\xf7\x8c\
\x67\x20\x44\xdf\x33\xce\x89\x4f\xe0\x0d\x78\xfa\x06\x6c\xa9\x0b\
\xef\xee\x47\x1d\x4a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x01\x2f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xc8\x00\x00\x00\x1e\x08\x02\x00\x00\x00\xe6\x5d\xc6\xfb\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\
\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x06\x14\x0f\x19\
\x01\x85\xd9\xdf\x39\x00\x00\x00\xc1\x49\x44\x41\x54\x78\xda\xed\
\xd8\x3b\x12\x82\x40\x14\x45\xc1\x87\x65\xee\x4a\x0c\x8c\x0c\xdd\
\x88\xfb\x5f\x86\xd7\x00\x51\xf0\x03\x94\x48\x64\x77\x06\xc2\x30\
\x03\x47\xad\xa2\xd9\x1f\xcf\x05\xbf\xb6\xdd\x1d\x4e\x55\x55\x95\
\xc7\xbe\x3c\x1f\x94\xb7\x7b\x07\x07\xe7\xf3\x25\xf2\xee\xfc\x4c\
\x4d\xac\x19\xfd\x34\xe9\x5f\x37\x79\x1d\xf0\x7e\x44\x37\x87\x7c\
\x7d\x8f\x7a\x43\xa5\x52\xa9\x4b\x65\x6a\x09\x19\xde\x97\xdb\x66\
\x3b\x52\x16\x3d\xb1\xc1\xc2\xfa\xeb\xca\xbc\x81\x47\x9e\x6e\x66\
\x9d\x38\x6f\xfa\x1b\xdf\x2d\xd6\x20\x2c\x84\x85\xb0\x10\x16\x08\
\x0b\x61\xf1\xcf\xb6\x6e\xc1\x2a\x9a\xee\x3d\x5c\xfb\x9e\xa9\xdd\
\x5c\xfe\x12\xcb\x2f\x16\xfe\x0a\x41\x58\x08\x0b\x61\x81\xb0\x10\
\x16\xc2\x02\x61\x21\x2c\x84\x05\xc2\x42\x58\x08\x0b\x84\x85\xb0\
\x10\x16\x08\x0b\x61\x21\x2c\x58\xea\x0a\xdc\xe3\x3c\x48\xb1\x12\
\xe5\xda\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x18\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x0e\x00\x00\x00\x0e\x08\x06\x00\x00\x00\x1f\x48\x2d\xd1\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x06\x18\
\x11\x0e\x11\xc1\x83\x7a\x09\x00\x00\x00\x98\x49\x44\x41\x54\x28\
\xcf\x63\x60\x20\x1d\x58\x33\x90\xa3\x29\x25\x25\xe5\xff\xfe\xfd\
\xfb\x43\x07\xa7\x26\x06\x6c\x9a\x98\x88\xd5\xec\xe0\xe0\x70\x16\
\x99\xcf\x08\x63\xf8\xf9\xf9\x2d\xe5\xe6\xe6\x8e\xc2\xa7\xf9\xeb\
\xd7\xaf\xcb\x36\x6d\xda\x14\x8d\x6e\x63\x14\x11\x16\x47\xfd\xff\
\xff\x5f\x09\xaf\x53\x15\x14\x14\x18\x97\x2f\x5f\x6e\x83\x4b\x1e\
\xa7\xc6\xf6\xf6\x76\xeb\xc8\xc8\xc8\x2c\x92\x35\x46\x46\x46\x1e\
\x51\x50\x50\x88\x26\x59\x23\x03\x03\x03\xc3\x83\x07\x0f\xfe\xe3\
\x92\x63\x81\x31\xfe\xfc\xf9\xb3\xec\xeb\xd7\xaf\x78\x03\xe8\xcf\
\x9f\x3f\xcb\x30\xa2\x83\x81\x81\x81\x01\x16\x62\x84\x00\x23\x23\
\xe3\x3d\x00\x4e\xaa\x3b\xc0\x7e\x7e\xf0\xf1\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x72\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x71\x00\x00\x00\x14\x08\x06\x00\x00\x00\xba\x47\xd2\x39\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x01\x14\x49\x44\x41\x54\x78\xda\xec\
\x9a\x41\x6a\x83\x60\x10\x85\xe7\x37\xb5\x0b\x51\xd0\x85\x50\xc8\
\x31\xbc\x81\x67\x10\x17\x5d\x75\xe3\x1d\x2c\x85\x40\xb3\xe9\x21\
\xa4\x90\x55\x40\xf1\x0c\xde\xa6\x50\xe2\x42\xc1\xa5\xa8\x9d\x91\
\x98\x92\x23\x4c\x78\x1f\x3c\x11\x97\xf3\xf1\x46\x7e\xd4\x44\x51\
\xf4\x4c\x44\x07\xce\x1b\x67\x4f\x40\x0b\x3f\x9c\x13\xe7\xf8\xc4\
\x97\xcf\x2c\xcb\xf2\x34\x4d\xc9\xf7\x7d\x8c\x46\x09\x7d\xdf\xef\
\xab\xaa\xfa\x28\x8a\x62\x67\xb8\x89\x97\xa6\x69\x42\xcf\xf3\x30\
\x19\x65\x0c\xc3\x40\x71\x1c\xb7\xd2\xc4\xd0\x75\x5d\x9a\xe7\x19\
\x53\x51\x86\x78\x13\x7f\x22\x91\x96\x65\x59\x03\x74\x72\x93\x88\
\x26\xea\xc3\xb2\xac\x7b\x89\x68\xa2\x3e\x36\x67\x68\xa2\x62\x8c\
\x31\xff\x12\xa7\x69\x5a\x25\x6e\x0f\x81\x9e\x16\xde\x49\x1c\xc7\
\x11\x93\x51\x86\x6d\xdb\x58\xa7\x0f\xf5\x4e\x14\x81\x90\xa8\x8f\
\xcd\xd9\x4d\xa2\xac\x54\xa0\x58\x22\xd6\x29\xd6\x29\x40\x13\x01\
\x0e\xfb\x90\xb8\x4a\x6c\xbb\xae\x0b\x1d\xc7\xc1\x59\x51\xd9\x19\
\x91\xbd\xc9\xed\xfa\x29\xea\xbb\x2c\xcb\x3c\x49\x12\x0a\x82\x00\
\xd3\x51\x82\x08\xac\xeb\x5a\x6e\x4f\xe6\xfa\x7b\xc6\x17\xe7\x95\
\xf3\x82\xf1\xa8\xe1\x97\x73\xe6\xbc\xff\x09\x30\x00\x3f\x60\x9a\
\xc1\x41\x1a\xd3\x62\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = "\
\x00\x05\
\x00\x6c\x84\xa9\
\x00\x66\
\x00\x61\x00\x6e\x00\x63\x00\x79\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x17\
\x01\x6a\xcc\xa7\
\x00\x69\
\x00\x6e\x00\x70\x00\x75\x00\x74\x00\x66\x00\x69\x00\x65\x00\x6c\x00\x64\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\
\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x0b\x31\x44\x87\
\x00\x70\
\x00\x75\x00\x73\x00\x68\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x70\x00\x72\x00\x65\x00\x73\x00\x73\x00\x65\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x13\
\x0c\x72\x05\xa7\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x63\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x2e\x00\x70\
\x00\x6e\x00\x67\
\x00\x16\
\x0a\xa8\xd9\x07\
\x00\x70\
\x00\x61\x00\x6e\x00\x65\x00\x6c\x00\x5f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x68\x00\x6f\x00\x76\x00\x65\
\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x09\x39\xc9\x07\
\x00\x6d\
\x00\x61\x00\x67\x00\x6e\x00\x69\x00\x66\x00\x69\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x04\x48\xc2\xc7\
\x00\x70\
\x00\x61\x00\x6e\x00\x65\x00\x6c\x00\x5f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x70\x00\x72\x00\x65\x00\x73\
\x00\x73\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0c\x9f\xd8\x27\
\x00\x65\
\x00\x6d\x00\x70\x00\x74\x00\x79\x00\x31\x00\x34\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x09\xd4\xdd\xc7\
\x00\x65\
\x00\x78\x00\x74\x00\x65\x00\x6e\x00\x73\x00\x69\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1e\
\x04\xbf\x51\xa7\
\x00\x70\
\x00\x61\x00\x6e\x00\x65\x00\x6c\x00\x5f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\
\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x68\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x06\x97\xc6\xe7\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x02\x9c\x73\xc7\
\x00\x70\
\x00\x75\x00\x73\x00\x68\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x03\x37\xcf\x07\
\x00\x70\
\x00\x61\x00\x6e\x00\x65\x00\x6c\x00\x5f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x02\xde\x12\xa7\
\x00\x69\
\x00\x6e\x00\x70\x00\x75\x00\x74\x00\x66\x00\x69\x00\x65\x00\x6c\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x01\xb9\x3c\x07\
\x00\x6c\
\x00\x6f\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x0f\x51\xa1\x47\
\x00\x73\
\x00\x69\x00\x64\x00\x65\x00\x62\x00\x61\x00\x72\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x08\x41\xf4\xa7\
\x00\x6c\
\x00\x69\x00\x6e\x00\x6b\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x00\x03\x58\x07\
\x00\x73\
\x00\x70\x00\x6c\x00\x69\x00\x74\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\
\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0c\x34\x0b\x47\
\x00\x70\
\x00\x61\x00\x6e\x00\x65\x00\x6c\x00\x5f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\
\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0d\xd4\x2f\x87\
\x00\x73\
\x00\x74\x00\x61\x00\x74\x00\x75\x00\x73\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x0e\x3d\x3c\x07\
\x00\x75\
\x00\x6e\x00\x6c\x00\x6f\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x09\x87\xa0\x87\
\x00\x70\
\x00\x75\x00\x73\x00\x68\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x68\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x15\x00\x00\x00\x03\
\x00\x00\x02\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x19\xb4\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x02\x48\x00\x00\x00\x00\x00\x01\x00\x00\x14\x45\
\x00\x00\x01\xde\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xfe\
\x00\x00\x02\x26\x00\x00\x00\x00\x00\x01\x00\x00\x12\xc6\
\x00\x00\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x10\xea\
\x00\x00\x01\x06\x00\x00\x00\x00\x00\x01\x00\x00\x08\x10\
\x00\x00\x01\x78\x00\x00\x00\x00\x00\x01\x00\x00\x0b\xf3\
\x00\x00\x01\xba\x00\x00\x00\x00\x00\x01\x00\x00\x0d\xda\
\x00\x00\x02\x86\x00\x00\x00\x00\x00\x01\x00\x00\x17\x60\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x06\x55\
\x00\x00\x03\x52\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x55\
\x00\x00\x01\x58\x00\x00\x00\x00\x00\x01\x00\x00\x0a\x96\
\x00\x00\x00\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x04\x4b\
\x00\x00\x00\x56\x00\x00\x00\x00\x00\x01\x00\x00\x01\x7f\
\x00\x00\x02\xde\x00\x00\x00\x00\x00\x01\x00\x00\x1b\x50\
\x00\x00\x00\x88\x00\x00\x00\x00\x00\x01\x00\x00\x03\x08\
\x00\x00\x01\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x09\xe6\
\x00\x00\x03\x14\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x06\
\x00\x00\x03\x34\x00\x00\x00\x00\x00\x01\x00\x00\x1e\x39\
\x00\x00\x02\x62\x00\x00\x00\x00\x00\x01\x00\x00\x15\xa8\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"zhuowei@localhost"
] | zhuowei@localhost |
b67c13a01e127f6498d27a720852819fe114e73b | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /dit/text_detection/ditod/mytrainer.py | 2ef2808133b8f07e4fa5b40963add1cd78ee60c7 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 28,476 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.engine import hooks
from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase
from .mycheckpointer import MyDetectionCheckpointer
from typing import Any, Dict, List, Set
import itertools
from detectron2.solver.build import maybe_add_gradient_clipping
from .dataset_mapper import DetrDatasetMapper
from .funsd_evaluation import FUNSDEvaluator
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"MyTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `MyTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
PathManager.mkdirs(output_dir)
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class MyTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = MyTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = MyTrainer.auto_scale_workers(cfg, comm.get_world_size())
self.cfg = cfg
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = MyDetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for key, value in model.named_parameters(recurse=True):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "backbone" in key:
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
if cfg.AUG.DETR:
mapper = DetrDatasetMapper(cfg, is_train=True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return FUNSDEvaluator(dataset_name, output_dir=output_folder)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `MyTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
MyTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
| [
"[email protected]"
] | |
656bb9c00f88f4d7c727692e5dc9a684b9ffb60a | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/reolink/switch.py | 1a4deda17e3f1e0b885a9ea80479aef28cb1f882 | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 8,720 | py | """Component providing support for Reolink switch entities."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from reolink_aio.api import Host
from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ReolinkData
from .const import DOMAIN
from .entity import ReolinkChannelCoordinatorEntity, ReolinkHostCoordinatorEntity
@dataclass
class ReolinkSwitchEntityDescriptionMixin:
"""Mixin values for Reolink switch entities."""
value: Callable[[Host, int], bool]
method: Callable[[Host, int, bool], Any]
@dataclass
class ReolinkSwitchEntityDescription(
SwitchEntityDescription, ReolinkSwitchEntityDescriptionMixin
):
"""A class that describes switch entities."""
supported: Callable[[Host, int], bool] = lambda api, ch: True
@dataclass
class ReolinkNVRSwitchEntityDescriptionMixin:
"""Mixin values for Reolink NVR switch entities."""
value: Callable[[Host], bool]
method: Callable[[Host, bool], Any]
@dataclass
class ReolinkNVRSwitchEntityDescription(
SwitchEntityDescription, ReolinkNVRSwitchEntityDescriptionMixin
):
"""A class that describes NVR switch entities."""
supported: Callable[[Host], bool] = lambda api: True
SWITCH_ENTITIES = (
ReolinkSwitchEntityDescription(
key="record_audio",
name="Record audio",
icon="mdi:microphone",
entity_category=EntityCategory.CONFIG,
supported=lambda api, ch: api.supported(ch, "audio"),
value=lambda api, ch: api.audio_record(ch),
method=lambda api, ch, value: api.set_audio(ch, value),
),
ReolinkSwitchEntityDescription(
key="siren_on_event",
name="Siren on event",
icon="mdi:alarm-light",
entity_category=EntityCategory.CONFIG,
supported=lambda api, ch: api.supported(ch, "siren"),
value=lambda api, ch: api.audio_alarm_enabled(ch),
method=lambda api, ch, value: api.set_audio_alarm(ch, value),
),
ReolinkSwitchEntityDescription(
key="auto_tracking",
name="Auto tracking",
icon="mdi:target-account",
entity_category=EntityCategory.CONFIG,
supported=lambda api, ch: api.supported(ch, "auto_track"),
value=lambda api, ch: api.auto_track_enabled(ch),
method=lambda api, ch, value: api.set_auto_tracking(ch, value),
),
ReolinkSwitchEntityDescription(
key="auto_focus",
name="Auto focus",
icon="mdi:focus-field",
entity_category=EntityCategory.CONFIG,
supported=lambda api, ch: api.supported(ch, "auto_focus"),
value=lambda api, ch: api.autofocus_enabled(ch),
method=lambda api, ch, value: api.set_autofocus(ch, value),
),
ReolinkSwitchEntityDescription(
key="gaurd_return",
name="Guard return",
icon="mdi:crosshairs-gps",
entity_category=EntityCategory.CONFIG,
supported=lambda api, ch: api.supported(ch, "ptz_guard"),
value=lambda api, ch: api.ptz_guard_enabled(ch),
method=lambda api, ch, value: api.set_ptz_guard(ch, enable=value),
),
ReolinkSwitchEntityDescription(
key="doorbell_button_sound",
name="Doorbell button sound",
icon="mdi:volume-high",
entity_category=EntityCategory.CONFIG,
supported=lambda api, ch: api.supported(ch, "doorbell_button_sound"),
value=lambda api, ch: api.doorbell_button_sound(ch),
method=lambda api, ch, value: api.set_volume(ch, doorbell_button_sound=value),
),
)
NVR_SWITCH_ENTITIES = (
ReolinkNVRSwitchEntityDescription(
key="email",
name="Email on event",
icon="mdi:email",
entity_category=EntityCategory.CONFIG,
supported=lambda api: api.supported(None, "email"),
value=lambda api: api.email_enabled(),
method=lambda api, value: api.set_email(None, value),
),
ReolinkNVRSwitchEntityDescription(
key="ftp_upload",
name="FTP upload",
icon="mdi:swap-horizontal",
entity_category=EntityCategory.CONFIG,
supported=lambda api: api.supported(None, "ftp"),
value=lambda api: api.ftp_enabled(),
method=lambda api, value: api.set_ftp(None, value),
),
ReolinkNVRSwitchEntityDescription(
key="push_notifications",
name="Push notifications",
icon="mdi:message-badge",
entity_category=EntityCategory.CONFIG,
supported=lambda api: api.supported(None, "push"),
value=lambda api: api.push_enabled(),
method=lambda api, value: api.set_push(None, value),
),
ReolinkNVRSwitchEntityDescription(
key="record",
name="Record",
icon="mdi:record-rec",
supported=lambda api: api.supported(None, "recording"),
value=lambda api: api.recording_enabled(),
method=lambda api, value: api.set_recording(None, value),
),
ReolinkNVRSwitchEntityDescription(
key="buzzer",
name="Buzzer on event",
icon="mdi:room-service",
entity_category=EntityCategory.CONFIG,
supported=lambda api: api.supported(None, "buzzer"),
value=lambda api: api.buzzer_enabled(),
method=lambda api, value: api.set_buzzer(None, value),
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a Reolink switch entities."""
reolink_data: ReolinkData = hass.data[DOMAIN][config_entry.entry_id]
entities: list[ReolinkSwitchEntity | ReolinkNVRSwitchEntity] = [
ReolinkSwitchEntity(reolink_data, channel, entity_description)
for entity_description in SWITCH_ENTITIES
for channel in reolink_data.host.api.channels
if entity_description.supported(reolink_data.host.api, channel)
]
entities.extend(
[
ReolinkNVRSwitchEntity(reolink_data, entity_description)
for entity_description in NVR_SWITCH_ENTITIES
if entity_description.supported(reolink_data.host.api)
]
)
async_add_entities(entities)
class ReolinkSwitchEntity(ReolinkChannelCoordinatorEntity, SwitchEntity):
"""Base switch entity class for Reolink IP cameras."""
entity_description: ReolinkSwitchEntityDescription
def __init__(
self,
reolink_data: ReolinkData,
channel: int,
entity_description: ReolinkSwitchEntityDescription,
) -> None:
"""Initialize Reolink switch entity."""
super().__init__(reolink_data, channel)
self.entity_description = entity_description
self._attr_unique_id = (
f"{self._host.unique_id}_{channel}_{entity_description.key}"
)
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self.entity_description.value(self._host.api, self._channel)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
await self.entity_description.method(self._host.api, self._channel, True)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.entity_description.method(self._host.api, self._channel, False)
self.async_write_ha_state()
class ReolinkNVRSwitchEntity(ReolinkHostCoordinatorEntity, SwitchEntity):
"""Switch entity class for Reolink NVR features."""
entity_description: ReolinkNVRSwitchEntityDescription
def __init__(
self,
reolink_data: ReolinkData,
entity_description: ReolinkNVRSwitchEntityDescription,
) -> None:
"""Initialize Reolink switch entity."""
super().__init__(reolink_data)
self.entity_description = entity_description
self._attr_unique_id = f"{self._host.unique_id}_{entity_description.key}"
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self.entity_description.value(self._host.api)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
await self.entity_description.method(self._host.api, True)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.entity_description.method(self._host.api, False)
self.async_write_ha_state()
| [
"[email protected]"
] | |
f1fad7503083f1f7f276f2d592da0566c09e3c07 | ea9a3d74db2621e8f0c51a9a75fdeb392a121687 | /project_tutorial_django/web_app_django/blog/views.py | 35cbf2ec1e2faaee4e9c9ffe6826538fee72727a | [] | no_license | lmorente/course-init-python | b7635a556f07776f637934ddf1d90b5e4cdec9fe | 2804128759edc1f6bd4da6bf0c94bc2ab8211a75 | refs/heads/master | 2023-05-15T07:36:50.795006 | 2021-06-04T15:42:59 | 2021-06-04T15:42:59 | 372,309,396 | 0 | 0 | null | 2021-06-04T15:43:00 | 2021-05-30T20:29:12 | Python | UTF-8 | Python | false | false | 877 | py | from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from .models import Category, Article
# Create your views here.
@login_required(login_url="login")
def articles(request):
articles = Article.objects.all()
paginator = Paginator(articles, 1)
page = request.GET.get('page')
page_articles = paginator.get_page(page)
return render(request, 'articles/list.html', {
'title': 'Artículos',
'articles': page_articles
})
@login_required(login_url="login")
def category(request, category_id):
category = get_object_or_404(Category, id=category_id)
articles = Article.objects.filter(categories=category_id)
return render(request, 'categories/category.html', {
'category': category,
'articles': articles
})
| [
"[email protected]"
] | |
9098bfdbd7db6283725e567cd3dda2d034e79410 | 27e6825265de8a1f481f2859f359311086e9331b | /main.py | 76bb7e7b049d6eff4da3387166909ebe28d01b62 | [] | no_license | wida00/Intento-fallido | 3fd74e790968c679878d64648511fca82f5c2d63 | 29b2b10f9c267c2551f9df0e9ac9eefb1d03c7a3 | refs/heads/master | 2023-02-04T10:08:03.359525 | 2019-02-23T00:37:38 | 2019-02-23T00:37:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | a=int(input("Introduce el primer número: "))
c=int(input("Introduce el segundo número: "))
producto=a*c
print("el producto de", a ,"y", c ,"es", producto)
doble_a=2*a
print("el doble de", a , "es", doble_a)
b=int(input("Introduce el cuarto número: "))
cuadrado_b=b**2
print("El cuadrado de", b , "es" ,cuadrado_b)
import math
d=int(input("Ingrese el quinto número: "))
raiz_d=math.sqrt(d)
print("la raiz cuadrada de", d ,"es", raiz_d) | [
"[email protected]"
] | |
43239bfd1d0df61388c3849fb6f59aa866dc62c3 | 8ba8ce658a4665d45d470d5ddc0c0b3357e46e4b | /Buttons.py | eebfe6dd1d3e3a5704764f2122f2a96918364998 | [] | no_license | Chidsuey/NestioScraper | e96698b9c81981ea10c257cc2160596ba38bc397 | 5edd40c3e66ec8134cea717844a8460bcf890a33 | refs/heads/master | 2023-06-26T14:47:31.436570 | 2021-07-31T18:49:38 | 2021-07-31T18:49:38 | 363,458,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | from Main import Main
class Buttons:
@staticmethod
def import_button_click():
print("something finally happened")
Main.html_file = Main.fileHandler.open_html_file(Main.gui)
just_the_file_name = Main.fileHandler.get_just_the_file_name()
Main.gui.import_text_box.config(state=NORMAL)
Main.gui.import_text_box.delete(1.0, END)
Main.gui.import_text_box.insert(INSERT, str(just_the_file_name))
Main.gui.import_text_box.config(state=DISABLED)
def scrape_button_click(self):
Main.gui.import_button.config(state=DISABLED)
Main.gui.scrape_button.config(state=DISABLED)
Main.excelFileHandler.excel_file_setup()
link_list = Main.htmlFileScraper.scrape_html_file_for_links(Main.html_file, Main.coding)
Main.owner_name = Main.htmlFileScraper.owner_name_checked_for_errors
final_formatted_file_name = Main.fileHandler.format_file_name_output(Main.owner_name)
Main.log_file = Main.logHandler.create_new_log(final_formatted_file_name)
finalized_info_list = Main.linkScraper.scrape_links_for_data(link_list, Main.gui.links_remaining_text)
Main.excelFileHandler.update_spreadsheet(finalized_info_list)
def options_button_click(self):
options_window = Main.gui.options_window()
if Main.coding.get() == "utf-16":
options_window.radio_0.select()
elif Main.coding.get() == "utf-8":
options_window.radio_1.select()
options_window.mainloop() | [
"[email protected]"
] | |
e116a20437af55a8fa3ddfb6da75e2f7e9bad73c | 8b1084c2a1f205c37cbba85a1ebada04814fd95d | /setup.py | 229252a7fda84cd772399a40156c482046fbc857 | [
"BSD-3-Clause"
] | permissive | benburrill/formiko | ef6188ab2857875253c51bf26320a532ebe12fee | 86630506c537f9517666d9b0d5b2a905e7385b01 | refs/heads/master | 2022-11-11T18:38:53.492449 | 2020-06-12T08:22:55 | 2020-06-12T08:22:55 | 275,467,118 | 0 | 0 | NOASSERTION | 2020-06-27T23:00:27 | 2020-06-27T23:00:26 | null | UTF-8 | Python | false | false | 6,470 | py | #!/usr/bin/env python
from setuptools import setup
from docutils.core import publish_string
from docutils.writers.manpage import Writer
from io import open
from gzip import open as zopen
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.command.install_data import install_data
from distutils.core import Command
from distutils.version import StrictVersion
from distutils.errors import DistutilsError
from distutils import log
from os import path, makedirs, listdir
from shutil import rmtree
from formiko import __version__, __url__, __comment__
def doc():
with open("README.rst", "r", encoding="utf-8") as readme:
return readme.read().strip()
def icons_data():
path = 'share/icons/hicolor'
icons = [("%s/scalable/apps" % path, ["icons/formiko.svg"])]
for size in (16, 22, 24, 32, 48, 64, 128, 256, 512):
icons.append(("%s/%dx%d/apps" % (path, size, size),
["icons/%dx%d/formiko.png" % (size, size)]))
return icons
def man_page(writer, src, dst):
with open(src, encoding="utf-8") as source:
rst = source.read().format(version=__version__)
with zopen(dst, 'wb') as destination:
destination.write(publish_string(source=rst, writer=writer))
class XBuild(build):
def initialize_options(self):
build.initialize_options(self)
self.man_base = None
def finalize_options(self):
build.finalize_options(self)
if self.man_base is None:
self.man_base = path.join(self.build_base, 'man')
def run(self):
build.run(self)
log.info("building man pages")
if self.dry_run:
return
writer = Writer()
if not path.exists(self.man_base):
makedirs(self.man_base)
for page in ('formiko', 'formiko-vim'):
log.info('manpage %s.rst -> %s/%s.1.gz'
% (page, self.man_base, page))
man_page(writer, page+'.rst', '%s/%s.1.gz' % (self.man_base, page))
class XClean(clean):
def initialize_options(self):
clean.initialize_options(self)
self.man_base = None
def finalize_options(self):
clean.finalize_options(self)
if self.man_base is None:
self.man_base = path.join(self.build_base, 'man')
def run(self):
clean.run(self)
log.info("clean man pages")
if self.dry_run:
return
if path.exists(self.man_base):
rmtree(self.man_base)
class XInstallData(install_data):
def initialize_options(self):
install_data.initialize_options(self)
self.man_base = None
self.build_base = None
def finalize_options(self):
install_data.finalize_options(self)
self.set_undefined_options('build', ('build_base', 'build_base'))
if self.man_base is None:
self.man_base = path.join(self.build_base, 'man')
def run(self):
self.data_files.append(
('share/man/man1',
list("%s/%s" % (self.man_base, page)
for page in listdir(self.man_base))))
install_data.run(self)
return False
class XCheckVersion(Command):
description = "check if all all versions in all files are same"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pkg_version = StrictVersion(__version__)
log.info("package version is %s", pkg_version)
ch_version = StrictVersion(self.read_changelog())
log.info("ChangeLog version is %s", ch_version)
meta_version = StrictVersion(self.read_metainfo())
log.info("metainfo version is %s", meta_version)
if not pkg_version == ch_version == meta_version:
raise DistutilsError("Versions are not same!")
def read_changelog(self):
"""Read last version From ChangeLog."""
with open("ChangeLog", encoding="utf-8") as chl:
for line in chl:
if line.startswith("Version"):
return line[8:].strip()
def read_metainfo(self):
"""Read last version from formiko.metainfo.xml."""
with open("formiko.metainfo.xml", encoding="utf-8") as meta:
for line in meta:
if "<release " in line:
vals = dict((x.split('=') for x in
filter(lambda x: '=' in x, line.split(' '))))
return vals.get("version", "").strip('"')
setup(
name="formiko",
version=__version__,
description=__comment__,
author="Ondrej Tuma",
author_email="[email protected]",
url=__url__,
packages=['formiko'],
data_files=[('share/doc/formiko', ['README.rst', 'COPYING', 'ChangeLog',
'AUTHORS']),
("share/applications", ["formiko.desktop",
"formiko-vim.desktop"]),
("share/metainfo", ['formiko.metainfo.xml']),
('share/formiko/icons', ['icons/formiko.svg'])] + icons_data(),
keywords=["doc", "html", "rst", "docutils", "md", "markdown", "editor"],
license="BSD",
long_description=doc(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: X11 Applications :: GTK",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Documentation",
"Topic :: Software Development :: Documentation",
"Topic :: Text Editors :: Documentation",
"Topic :: Text Editors :: Text Processing",
"Topic :: Text Processing",
"Topic :: Text Processing :: Markup",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Utilities"],
requires=['docutils (>= 0.12)', 'python_gi', 'webkit2', 'gtksourceview'],
install_requires=['docutils >= 0.12'],
entry_points={
'gui_scripts': [
'formiko = formiko.main:main',
'formiko-vim = formiko.main:main_vim'
]
},
cmdclass={'build': XBuild, 'clean': XClean, 'install_data': XInstallData,
'check_version': XCheckVersion}
)
| [
"[email protected]"
] | |
7c29829b18a503aa9a488e818fc301d5045785ec | 2d9005b2398691b2ed3f01da47d726ecef10f87d | /blog/migrations/0001_initial.py | 57bb77a1a40de40d25d813d6d76b7ce9a3f96132 | [] | no_license | artittiya/my-firts-blog | 05bb76138f665861fc79c726adf9871f61359ac6 | f1b38d9659435e994df83c09cf703d631d6885e6 | refs/heads/master | 2022-12-14T01:09:17.379229 | 2020-09-21T04:14:27 | 2020-09-21T04:14:27 | 295,311,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 3.1.1 on 2020-09-14 04:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"artittiya"
] | artittiya |
455a6d9d0beb69031b3fdee23506936411a93cef | 452d2b1ae77e092d8f243e7ee7853d39d82e50d1 | /qiwipyapi/request.py | 1faa5f5fb334ec1a249168f3165b155b7cc9b3ad | [
"MIT"
] | permissive | semenovsd/qiwipyapi | cc8779f27f129d5f040068411856054ea59f5851 | 190e5ba141549c10885bee597b52c90b298e86d3 | refs/heads/master | 2022-12-24T07:21:55.955105 | 2020-09-06T17:59:59 | 2020-09-06T17:59:59 | 291,787,048 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import requests
from requests import RequestException
from qiwipyapi.utils import retry
@retry(RequestException, tries=3, delay=5)
def request(method, request_url, **kwargs):
try:
response = requests.Session().request(method=method, url=request_url, headers=kwargs.get('headers'),
params=kwargs.get('params'), data=kwargs.get('data'),
json=kwargs.get('json'))
except RequestException as e:
raise RequestException(e, method, request_url, kwargs)
else:
return response
| [
"[email protected]"
] | |
67d51ae226db8eddd9229a22fc2b983a8f6ec541 | 76b4329e495e8325d14e7b198f24d14464e9683a | /dotAi/Ball.py | 2181ef8b7cc88f5ec2a5c3187053d05bfe184ec3 | [] | no_license | concastor/Dot-AI | 08c5578472ee3e8dc372ac31d2220cc35bac7618 | a466c4e88072090678b55ff03e5b9630d5002370 | refs/heads/master | 2020-05-05T09:44:43.721151 | 2019-04-07T04:49:33 | 2019-04-07T04:49:33 | 179,915,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | from tkinter import *
from Brain import brain
import math
#the balls that will be modified
class Ball:
def __init__(self, canvas, colour, x, y):
self.brain = brain(1000)
self.x = x
self.y = y
self.moveNum = 0
self.moves = 0
self.finishedIndex = self.brain.size
self.isDead = False
self.finished = False
self.canvas = canvas
self.id = canvas.create_oval(x-5, y-5, x+5, y+5, fill=colour)
self.fitness = 0.0
#updates all the stats related to the ball
def update(self, fx, fy):
#check if its dead before doing anything
if (self.isDead == False):
#moves balls
self.isDead = self.move()
#check if its dead after it moved
if self.isDead == False:
#check if ball is within bounds
if (self.x < 5 or self.x > 995 or self.y < 5 or self.y > 595):
self.isDead = True
else:
self.isDead = self.finish(fx,fy)
#increase number of moves taken
self.moves += 1
#finds distance to the finish
def distance(self, fx, fy):
#first time ive had a use for pythagorem theorem
a = self.x - fx
b = self.y - fy
c = (a*a) + (b*b)
c = math.sqrt(c)
return c
#check if it collides with
def finish(self, fx, fy):
d = self.distance(fx,fy)
if d <= 15:
self.finished = True
self.finishedIndex = self.moveNum
return True
else:
return False
#moves the dot depending on the brain
def move(self):
if self.y == 300:
if self.x < 600 and self.x > 400:
self.isDead = True
return True
#check if dot is out of moves
if (self.moveNum >= self.brain.size):
self.isDead == True
return True
#move left
if(self.brain.moves[self.moveNum] == 1):
self.canvas.move(self.id, -5, 0)
self.x -= 5
#move up
elif(self.brain.moves[self.moveNum] == 2):
self.canvas.move(self.id, 0, -5)
self.y -= 5
#move right
elif(self.brain.moves[self.moveNum]== 3):
self.canvas.move(self.id, 5, 0)
self.x += 5
#move down
elif(self.brain.moves[self.moveNum] == 4):
self.canvas.move(self.id, 0, 5)
self.y += 5
self.moveNum += 1
return False
#finds the fitness of the balls to determine what the next generation will look like
def findFitness(self, fx,fy):
self.fitness = 0.0
#score if dot finished
if (self.finished):
self.fitness += 1
self.fitness += 1.0/self.moves
else:
#score if dot doesent finish
d = self.distance(fx,fy)
self.fitness = (1.0/((self.distance(fx,fy)*2)))
return self.fitness
| [
"[email protected]"
] | |
0ac91612d4f7e233be256f3b64ef59542e2f0181 | 85bd1011de7b70b3bc3f9a0d8f70a36b63023a6a | /learn/urls.py | 163defb127a04e170fdbe285f27d43349b82ede5 | [] | no_license | myusuf002/textta | 9e7efc4cbd8b7acf39d62a12543bde0b78eff0ef | a9e3b785690f79752875dbdb6becea871b868c1b | refs/heads/master | 2022-11-19T02:06:48.110119 | 2020-07-15T06:48:01 | 2020-07-15T06:48:01 | 234,483,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.viewIndex, name ="learn"),
path('vectorizer', views.viewVectorizer, name ="vectorizer"),
] | [
"[email protected]"
] | |
cfab454dcc7ce9d45e0246195bf3d3baa357fa25 | 01991c3d1ace66505dc4f491e5c095497e74c55d | /movies/objects.py | 0cd88b9c417019f985d4b9f29c8007c5d9613bd7 | [] | no_license | Falconier/Python-Stuff | 57655494bb80b9a8abbc35c0827f1a44ce2f6d2d | 944aeab9f01a4e03c5943cdbec9f677c97cd7382 | refs/heads/master | 2020-04-26T19:35:37.779377 | 2019-06-10T02:38:02 | 2019-06-10T02:38:02 | 173,780,213 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | class Movie:
def __init__(self, id=0, name=None, year=0, minutes=0, category=None):
self.id = id
self.name = name
self.year = year
self.minutes = minutes
self.category = category
def getName(self):
return self.name
def getID(self):
return self.id
class Category:
def __init__(self, id=0, name=None):
self.id = id
self.name = name
| [
"[email protected]"
] | |
4ec90fac740148b2459acee52674a79f1e5aa322 | 9c437c077af3c9ef1902f68a433ea0c1a7732ec7 | /test.py | c95efe8c960c06494d2df1d6107ff1ca0c4f6796 | [] | no_license | mingdh/studydatacollect | e95ea70b9e48193fe6203734d3c8472d33cf5c4f | 04d942cb76847cb74da28d2cddd2ae721ef4725f | refs/heads/master | 2022-07-18T07:16:40.737392 | 2020-05-12T17:46:01 | 2020-05-12T17:46:01 | 262,999,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | from lxml import etree
from bs4 import BeautifulSoup
text='''
<li class="li li-fisrt"><a href="link.html">first tiem</a></li>
'''
soup=BeautifulSoup(text,"lxml")
print(soup.prettify)
print(soup.li.p)
"""
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Host': 'httpbin.org'
}
dict = {'name': 'Germey'}
data = bytes(parse.urlencode(dict),encoding='utf8')
req=request.Request(url=url,data=data,headers=headers,method='POST')
response =request.urlopen(req)
print(response.read().decode('utf-8'))
"""
| [
"[email protected]"
] | |
1ec00d0f8e5da0616c6a8683aff577a1f033ee99 | 5f63eab2a1d835b3078dcd57268c2d2a5341e15e | /dtfabric/__init__.py | f74641c23130e6ee2d091e6adb05c55cb29951a7 | [
"Apache-2.0"
] | permissive | Onager/dtfabric | 7d86819036f3abd60ec1979634849b933cbebef7 | 987e9d1d27d079ae5e22602e90bd6011108162b5 | refs/heads/master | 2020-03-28T21:10:04.746772 | 2018-09-02T11:36:17 | 2018-09-02T11:45:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | # -*- coding: utf-8 -*-
"""Data type fabric."""
__version__ = '20180808'
| [
"[email protected]"
] | |
2866ab8e3ed6eb2c152a999fa59d81cc5fea1756 | 5cb62d2dfbd905386960a1e4eac162479614f3ec | /Anova.py | 9083bd15083ce11569c0635650389ec6ea1e61f5 | [] | no_license | rkrai2805/ML | 4118100644b71026d5ced37b9cf0cfb5967e2f31 | f8347075390c6ceac5fbede3ad02654a47004875 | refs/heads/main | 2023-06-18T19:41:37.162352 | 2021-07-01T11:30:50 | 2021-07-01T11:30:50 | 382,008,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 1 14:35:05 2021
@author: ravikant.rai
"""
| [
"[email protected]"
] | |
17fca6b7edfa22a311533438616e101d58482ef6 | 672fef1cd92f24cc13dbb651f60d7b1081468bed | /catkin_ws/build/catkin_generated/stamps/Project/_setup_util.py.stamp | 3b50f58fd514c23297640f421c5264f9c9db7256 | [] | no_license | Forrest-Z/DevelopAgv | 49eca36e0a4a714fb232100b6216f4801409aa56 | e7d0ac39f3964557d7f67f074ddba73e5c6f0d3a | refs/heads/master | 2022-12-14T12:41:30.309513 | 2020-09-07T14:21:16 | 2020-09-07T14:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,362 | stamp | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = '/home/nhamtung/TungNV/MyNavigation/catkin_ws/devel;/opt/ros/melodic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"[email protected]"
] | |
37a1d6436ecff80a8a3ce71f5fe107498f93ddd9 | cee3f76b7f3b442167dedb58b519a673f1932796 | /massRoom/models.py | d2e4eb2b9e205530c2723be18885f362acb98920 | [] | no_license | funnyBigPanda/MassRoom | acad4c78b7d6f5399aa37cfe8fcd00aa1d4740c0 | 1ac75db7acea4e264cb024720a1fadada603ac85 | refs/heads/master | 2021-01-17T20:01:52.948164 | 2016-08-10T15:44:22 | 2016-08-10T15:44:22 | 65,393,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Staff(models.Model):
name = models.CharField(max_length=200)
position = models.CharField(max_length=200)
description = models.TextField()
email = models.EmailField(null=True)
phone_number = models.BigIntegerField(null=True)
image = models.ImageField(upload_to='staff_image')
def __str__(self):
return self.name
class Service(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
image = models.ImageField(upload_to='service_image')
def __str__(self):
return self.name
class Record(models.Model):
#processed = 'Оброблено'
#not_processed = 'Не оброблено'
name = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
comment = models.TextField()
massage = models.CharField(max_length=200)
def __str__(self):
return self.massage + ' (' + self.name + ')' | [
"Sergiy"
] | Sergiy |
17625533abf79e37e7f1c5323a554da9a3bc208f | 2198a02c80111b606189348ea70a93d1ae0aba9b | /ddaLines.py | c490916831f802b23c19de1c51c114423a4a709b | [] | no_license | serajshaikh/CG_Lab | c7be6535453b9f9ac87835e02a9b673b1ad1ba68 | 94a8b57144cfcef41abbe4ed0c960abe08d27af3 | refs/heads/main | 2023-02-28T23:07:15.039221 | 2021-02-05T18:19:11 | 2021-02-05T18:19:11 | 336,256,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | """Write a program to draw a line using DDA algorithm"""
from OpenGL.GL import*
from OpenGL.GLU import*
from OpenGL.GLUT import*
import sys
x1,y1,x2,y2 = map(int,input("Enter (x1,y1,x2,y2) space saperated :").split())
def init():
glClearColor(0.0,0.0,0.0,1.0)
gluOrtho2D(-100.0,100.0,-100.0,100.0)
def ROUND(n):
return int(n+0.5)
def dda(x1,y1,x2,y2):
x,y = x1,y1
length = (x2-x1) if (x2-x1) > (y2-y1) else (y2-y1)
dx = (x2-x1)/float(length)
dy = (y2-y1)/float(length)
glVertex2f(ROUND(x),ROUND(y))
for i in range(length):
x+= dx
y+= dy
glVertex2f(ROUND(x),ROUND(y))
def plotlines():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glLineWidth(5.0)
glBegin(GL_LINES)
dda(x1,y1,x2,y2)
glEnd()
glFlush()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(50,50)
glutCreateWindow("Plot Line")
glutDisplayFunc(plotlines)
init()
glutMainLoop()
main() | [
"[email protected]"
] | |
980ead55b4363494a54dada9abdf1ead8f136e48 | 6acb8ae5a4d73b095f04d1491885fe1916fb3a09 | /tca/migrations/0001_initial.py | 2b25776c4933b12071d0c983d46e5844142a28bc | [] | no_license | whroid/TuoP | dc0617cde16b65af4cfa1cf8798cfd67a42b1344 | 0d3d49da92b64d64e504633f2edb887507a03c00 | refs/heads/master | 2016-09-10T13:10:57.748899 | 2015-05-04T14:40:16 | 2015-05-04T14:40:16 | 34,980,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sso', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.CharField(max_length=200)),
('create_time', models.DateField(verbose_name=b'create_time')),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.ForeignKey(to='tca.Topic')),
('user', models.ForeignKey(to='sso.User')),
],
),
]
| [
"[email protected]"
] | |
fc352360360b455a48bab3a48fa44fb27bfe97f4 | 0508a120ecf9737220c213cd4329837c0fa14ddb | /collector/views.py | bccf58682054a5968fd4a0b62b055159b53cb623 | [] | no_license | nguyendinhtrieu1996/foodbike_recommender_system | 759bc1ca6ba80eb88cda4e823fdedd91ef49769b | a74a982b798894846b07fb6b2189135fcd412192 | refs/heads/master | 2020-03-23T00:21:17.283741 | 2018-08-05T16:02:03 | 2018-08-05T16:02:03 | 140,858,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from django.http import HttpResponse
from collector.models import Log
import datetime
def log(request):
if request.method == 'POST':
date = request.GET.get('date', datetime.datetime.now())
user_id = request.POST['user_id']
content_id = request.POST['content_id']
event = request.POST['event_type']
l = Log(
created=date,
user_id=user_id,
content_id=str(content_id),
event=event)
l.save()
else:
HttpResponse('log only works with POST')
return HttpResponse('ok')
| [
"[email protected]"
] | |
6ce9f040225394435da389a3d031af5834a8fdd0 | 9f67ef32a7fb8fac67654c8ae32d32f1614d77b2 | /test.py | 671ea82591b6c820e6fd1f9b14abd02ca2168a50 | [
"MIT"
] | permissive | mengfanhua/graduate | 832c1df115546cbeca065a3e65e15c888f1c8fdb | c65f6be6fe2fb57232ddc2b855a78d480d64c978 | refs/heads/master | 2023-04-09T13:14:51.754457 | 2021-04-14T05:07:57 | 2021-04-14T05:07:57 | 332,121,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from PIL import Image
a = Image.open("C:/Users/meng/Desktop/1989.png")
a = a.convert("RGBA")
a.putalpha(255)
a.save("C:/Users/meng/Desktop/1989.png")
| [
"[email protected]"
] | |
a4caec79aeafca6c69483d1ccb3cd0a028661f5a | 52fd8726a977bb98291d08158bd1e3c3396a59a8 | /tetris.py | df067be057b44513cfed760819bcc225d35a99a0 | [] | no_license | saiffmirza/Tetris | 28153852fd5e27b14ab623c4495aa7ed30e18031 | 0a74ef7113dfc185ba54c033725af1b16d2b1f0d | refs/heads/master | 2022-11-12T19:03:02.446012 | 2020-06-25T03:56:53 | 2020-06-25T03:56:53 | 274,824,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,883 | py | import pygame
import random
# still need to add music
from pygame import mixer
pygame.mixer.pre_init(frequency=44100)
pygame.init()
pygame.mixer.init(frequency=44100)
pygame.font.init()
# GLOBALS VARS
s_width = 800
s_height = 800
play_width = 300 # meaning 300 // 10 = 30 width per block
play_height = 600 # meaning 600 // 20 = 30 height per block
block_size = 30
points1 = 0
top_left_x = (s_width - play_width) // 2
top_left_y = s_height - play_height - 50
# SHAPE FORMATS
S = [['.....',
'.....',
'..00.',
'.00..',
'.....'],
['.....',
'..0..',
'..00.',
'...0.',
'.....']]
Z = [['.....',
'.....',
'.00..',
'..00.',
'.....'],
['.....',
'..0..',
'.00..',
'.0...',
'.....']]
I = [['..0..',
'..0..',
'..0..',
'..0..',
'.....'],
['.....',
'0000.',
'.....',
'.....',
'.....']]
O = [['.....',
'.....',
'.00..',
'.00..',
'.....']]
J = [['.....',
'.0...',
'.000.',
'.....',
'.....'],
['.....',
'..00.',
'..0..',
'..0..',
'.....'],
['.....',
'.....',
'.000.',
'...0.',
'.....'],
['.....',
'..0..',
'..0..',
'.00..',
'.....']]
L = [['.....',
'...0.',
'.000.',
'.....',
'.....'],
['.....',
'..0..',
'..0..',
'..00.',
'.....'],
['.....',
'.....',
'.000.',
'.0...',
'.....'],
['.....',
'.00..',
'..0..',
'..0..',
'.....']]
T = [['.....',
'..0..',
'.000.',
'.....',
'.....'],
['.....',
'..0..',
'..00.',
'..0..',
'.....'],
['.....',
'.....',
'.000.',
'..0..',
'.....'],
['.....',
'..0..',
'.00..',
'..0..',
'.....']]
shapes = [S, Z, I, O, J, L, T]
shape_colors = [(255, 176, 71), (238, 130, 238), (111, 176, 71), (111, 176, 213), (255, 165, 0), (111, 247, 213), (210, 70, 0)]
# index 0 - 6 represent shape
mixer.music.load("background.wav")
mixer.music.set_volume(0.5)
mixer.music.play(-1)
class Piece(object):
rows = 20 # y
columns = 10 # x
def __init__(self, column, row, shape):
self.x = column
self.y = row
self.shape = shape
self.color = shape_colors[shapes.index(shape)]
self.rotation = 0 # number from 0-3
def create_grid(locked_positions={}):
grid = [[(0,0,0) for x in range(10)] for x in range(20)]
for i in range(len(grid)):
for j in range(len(grid[i])):
if (j,i) in locked_positions:
c = locked_positions[(j,i)]
grid[i][j] = c
return grid
def convert_shape_format(shape):
positions = []
format = shape.shape[shape.rotation % len(shape.shape)]
for i, line in enumerate(format):
row = list(line)
for j, column in enumerate(row):
if column == '0':
positions.append((shape.x + j, shape.y + i))
for i, pos in enumerate(positions):
positions[i] = (pos[0] - 2, pos[1] - 4)
return positions
def valid_space(shape, grid):
accepted_positions = [[(j, i) for j in range(10) if grid[i][j] == (0,0,0)] for i in range(20)]
accepted_positions = [j for sub in accepted_positions for j in sub]
formatted = convert_shape_format(shape)
for pos in formatted:
if pos not in accepted_positions:
if pos[1] > -1:
return False
return True
def check_lost(positions):
for pos in positions:
x, y = pos
if y < 1:
return True
return False
def get_shape():
global shapes, shape_colors
return Piece(5, 0, random.choice(shapes))
def draw_text_middle(text, size, color, surface, textwidth):
font = pygame.font.SysFont('papyrusttc', size, bold=True)
label = font.render(text, 1, color)
surface.blit(label, textwidth )
def draw_grid(surface, row, col):
sx = top_left_x
sy = top_left_y
for i in range(row):
pygame.draw.line(surface, (255,255,255), (sx, sy+ i*30), (sx + play_width, sy + i * 30)) # horizontal lines
for j in range(col):
pygame.draw.line(surface, (255,255,255), (sx + j * 30, sy), (sx + j * 30, sy + play_height)) # vertical lines
def clear_rows(grid, locked):
# need to see if row is clear the shift every other row above down one
global points1
winner = mixer.Sound("win.wav")
winner.play()
inc = 0
for i in range(len(grid)-1,-1,-1):
row = grid[i]
if (0, 0, 0) not in row:
points1 += 1
cleared = mixer.Sound("cleared.wav")
cleared.play()
inc += 1
# add positions to remove from locked
ind = i
for j in range(len(row)):
try:
del locked[(j, i)]
except:
continue
if inc > 0:
for key in sorted(list(locked), key=lambda x: x[1])[::-1]:
x, y = key
if y < ind:
newKey = (x, y + inc)
locked[newKey] = locked.pop(key)
#ADD POINTS SYSTEM HERE
def show_points(surface):
font = pygame.font.SysFont('papyrusttc', 30)
label = font.render('Points', 1, (255,255,255))
sx = top_left_x - play_width + 100
sy = top_left_y + play_height/2 - 100
surface.blit(label, (sx + 10, sy- 30))
def draw_next_shape(shape, surface):
font = pygame.font.SysFont('papyrusttc', 30)
label = font.render('Next Shape', 1, (255,255,255))
sx = top_left_x + play_width + 50
sy = top_left_y + play_height/2 - 100
format = shape.shape[shape.rotation % len(shape.shape)]
for i, line in enumerate(format):
row = list(line)
for j, column in enumerate(row):
if column == '0':
pygame.draw.rect(surface, shape.color, (sx + j*30, sy + i*30, 30, 30), 0)
surface.blit(label, (sx + 10, sy- 30))
def draw_window(surface):
surface.fill((0,0,0))
# Tetris Title
font = pygame.font.SysFont('papyrusttc', 60)
label = font.render('TETRIS', 1, (255,255,255))
surface.blit(label, (top_left_x + play_width / 2 - (label.get_width() / 2), 30))
for i in range(len(grid)):
for j in range(len(grid[i])):
pygame.draw.rect(surface, grid[i][j], (top_left_x + j* 30, top_left_y + i * 30, 30, 30), 0)
# draw grid and border
draw_grid(surface, 20, 10)
pygame.draw.rect(surface, (200,200,200), (top_left_x, top_left_y, play_width, play_height), 5)
#pygame.display.update()
def main():
global grid
locked_positions = {} # (x,y):(255,0,0)
grid = create_grid(locked_positions)
change_piece = False
run = True
current_piece = get_shape()
next_piece = get_shape()
clock = pygame.time.Clock()
fall_time = 0
while run:
fall_speed = 1.00
grid = create_grid(locked_positions)
fall_time += clock.get_rawtime()
clock.tick()
movement = mixer.Sound("movement.wav")
mixer.Sound.set_volume(movement, 0.2)
# PIECE FALLING CODE
if fall_time/1000 >= fall_speed:
fall_time = 0
current_piece.y += 1
if not (valid_space(current_piece, grid)) and current_piece.y > 0:
current_piece.y -= 1
change_piece = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
current_piece.x -= 1
movement.play()
if not valid_space(current_piece, grid):
current_piece.x += 1
elif event.key == pygame.K_RIGHT:
current_piece.x += 1
movement.play()
if not valid_space(current_piece, grid):
current_piece.x -= 1
elif event.key == pygame.K_SPACE:
# rotate shape
movement.play()
current_piece.rotation = current_piece.rotation + 1 % len(current_piece.shape)
if not valid_space(current_piece, grid):
current_piece.rotation = current_piece.rotation - 1 % len(current_piece.shape)
if event.key == pygame.K_DOWN:
# move shape down
movement.play()
current_piece.y += 1
if not valid_space(current_piece, grid):
current_piece.y -= 1
shape_pos = convert_shape_format(current_piece)
# add piece to the grid for drawing
for i in range(len(shape_pos)):
x, y = shape_pos[i]
if y > -1:
grid[y][x] = current_piece.color
# IF PIECE HIT GROUND
if change_piece:
for pos in shape_pos:
p = (pos[0], pos[1])
locked_positions[p] = current_piece.color
current_piece = next_piece
next_piece = get_shape()
change_piece = False
# call four times to check for multiple clear rows
clear_rows(grid, locked_positions)
draw_window(win)
draw_text_middle(str(points1), 80, (255,255,255), win, (100,350))
show_points(win)
draw_next_shape(next_piece, win)
pygame.display.update()
# Check if user lost
if check_lost(locked_positions):
run = False
draw_text_middle("You Lost", 40, (255,255,255), win, (100,20))
pygame.display.update()
pygame.time.delay(5000)
def main_menu():
run = True
while run:
win.fill((0,0,0))
draw_text_middle('TETRIS - THE GAME', 80, (0, 255, 255), win, (50,25))
draw_text_middle('Instructions:', 60, (255, 255, 255), win, (50,300))
draw_text_middle('Press left and right arrow keys to navigate', 30, (255, 255, 255), win, (75,400))
draw_text_middle('Press down arrow keys to move faster', 30, (255, 255, 255), win, (75,450))
draw_text_middle('Press spacebar to rotate', 30, (255, 255, 255), win, (75,500))
draw_text_middle('Press any key to begin:', 60, (255, 0, 0), win, (100,150))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
main()
pygame.quit()
win = pygame.display.set_mode((s_width, s_height))
pygame.display.set_caption('Tetris')
main_menu() # start game
| [
"[email protected]"
] | |
b64ab24ee58e240b572da93c84dc51e52939a174 | a49b4983ca08b175f74c8faeb70bcf0d66b9d36a | /perceptron_tester.py | f37f003ac5ef1dcb84beb1ccab028b0854b9a3d0 | [] | no_license | romanticegg/FaceDetection | b423e50d2834055914edfe7fb1c3071ff979fec8 | 04e97a149b893aef95179a04c57a50f1c168dd6a | refs/heads/master | 2021-06-13T17:32:29.308673 | 2017-05-07T19:00:02 | 2017-05-07T19:00:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,399 | py | import load_face_data
from perceptron import Perceptron
import numpy as np
from sklearn.decomposition import PCA
import feature_extraction
from sklearn.preprocessing import StandardScaler
training_data, validation_data, test_data = load_face_data.load_data()
train_images = np.array(training_data[0])
train_labels = np.array(training_data[1])
test_images = np.array(test_data[0])
test_labels = np.array(test_data[1])
validation_images = np.array(validation_data[0])
validation_labels = np.array(validation_data[1])
# combine training and validation data to get better results
xyz = validation_data[0] + training_data[0]
abc = []
abc.extend(validation_data[1])
abc.extend(training_data[1])
train_and_valid_images = np.array(xyz)
train_and_valid_labels = np.array(abc)
def run_regular_perceptron(rate, epochs):
perceptron = Perceptron(rate, epochs, "Perceptron 1")
perceptron.fit(train_images, train_labels)
results = perceptron.predict(test_images)
perceptron.calculate_results(results, test_labels, 150)
perceptron.graph_perceptron()
"""
A single perceptron performed very well on this binary classification achieving 100%
accuracy by the end of the 7th epoch on the training data.
For fun I reduced the problem down to a smaller number of dimensions (450) using PCA
from sklearn.
"""
def run_pca_perceptron(rate, epochs, dimensions):
# Dimensionality Reduction down to 450 dimensions
pca = PCA(n_components=dimensions)
pca.fit(train_images)
pca_train_images = pca.transform(train_images)
pca_test_images = pca.transform(test_images)
# try perceptron on reduced dimensionality data
perceptron2 = Perceptron(rate, epochs, "Perceptron 2 PCA - ", rate)
perceptron2.fit(pca_train_images, train_labels)
# perceptron2.graph_perceptron()
"""
Through testing I was able to determine that a reduction to 450 dimensions from the original 4200
still allows the perceptron to converge. Thus it maintains 100% accuracy on the training data.
"""
# Check Results on Test Data
results_with_pca = perceptron2.predict(pca_test_images)
perceptron2.calculate_results(results_with_pca, test_labels, 150)
"""
Interestingly enough the perceptron with the reduced dimensions actually out performed the
perceptron without dimensional reduction. 76 correct vs 77.
The problem here is that the perceptron is suffering from over fitting. That is the model is perfect
on the training data, but that does not linearly separate the test data. For this reason I think
more significant features need to be found to get a better model.
"""
def run_pca50_perceptron():
# Try PCA with 50 features
pca2 = PCA(n_components=50)
pca2.fit(train_images)
pca_train_images2 = pca2.transform(train_images)
pca_test_images2 = pca2.transform(test_images)
# try perceptron with 50 dimensions
perceptron3 = Perceptron(0.01, 15, "Perceptron 3 PCA - 50")
perceptron3.fit(pca_train_images2, train_labels)
perceptron3.graph_perceptron()
results_with_pca2 = perceptron3.predict(pca_test_images2)
perceptron3.calculate_results(results_with_pca2, test_labels, 150)
"""
Again this didn't do too well because the model is still over fitting the training data. 51.33% the same as before.
"""
def run_train_validation_perceptron(rate, epochs):
perceptron4 = Perceptron(rate, epochs, "Perceptron 4 Train & Validation")
perceptron4.fit(train_and_valid_images, train_and_valid_labels)
final_results = perceptron4.predict(test_images)
perceptron4.calculate_results(final_results, test_labels, 150)
perceptron4.graph_perceptron()
"""
Even with all of the validation images added into the training set the
perceptron is still only 57.77% accurate which isn't very good. I will have
to try and determine a way to extract features from the face images such that
they are more easily distinguished from random images.
"""
def run_sym_face_perceptron():
training_data_2d, validation_data_2d, test_data_2d = load_face_data.load_data(two_d=True)
train_images_2d = np.array(training_data_2d[0])
train_labels_2d = np.array(training_data_2d[1])
standardized_images = StandardScaler().fit_transform(train_images_2d)
test_images_2d = np.array(test_data_2d[0])
test_labels_2d = np.array(test_data_2d[1])
standardized_test_images = StandardScaler().fit_transform(test_images_2d)
validation_images_2d = np.array(validation_data_2d[0])
validation_labels_2d = np.array(validation_data_2d[1])
sym_perceptron = Perceptron(3, 100, "Sym Face")
sym_perceptron.fit(standardized_images, train_labels)
sym_results = sym_perceptron.predict(standardized_test_images)
sym_perceptron.calculate_results(sym_results, test_labels, 150)
sym_perceptron.graph_perceptron()
"""
Symmetric face feature extraction didn't work any better than the standard perceptron. Additionally,
it takes significantly longer to run. The main issue here is that the perceptron is unable
to converge. Thus the feature extraction is not distinguishing the face and not-face very well.
Standardizing the images made the perceptron run faster, but it still doesn't converge
"""
def run_average_face_perceptron(rate, epochs, standardize=False):
# extract just the face images from both the training and validation datasets
faces = feature_extraction.get_face_images(train_and_valid_images, train_and_valid_labels)
# find the average of all the face images and subtract is from every image
# this also centers all of the images
new_training_images = feature_extraction.find_average_face(faces, train_images)
# Find the average image in the testing set and subtract it from every image
# this allows us to compare the testing images more accurately
new_test_images = feature_extraction.find_average_face(test_images, test_labels)
if standardize:
standardized_train_images = StandardScaler().fit_transform(new_training_images)
standardized_test_images = StandardScaler().fit_transform(new_test_images)
average_face_perceptron = Perceptron(rate, epochs, "Perceptron Average Face (Standardized)")
average_face_perceptron.fit(standardized_train_images, train_labels)
average_results = average_face_perceptron.predict(standardized_test_images)
average_face_perceptron.calculate_results(average_results, test_labels, 150)
average_face_perceptron.graph_perceptron()
else:
average_face_perceptron = Perceptron(rate, epochs, "Perceptron Average Face")
average_face_perceptron.fit(new_training_images, train_labels)
average_results = average_face_perceptron.predict(new_test_images)
average_face_perceptron.calculate_results(average_results, test_labels, 150)
average_face_perceptron.graph_perceptron()
# for b in range(25):
# print(average_results[b], ": ", test_labels[b])
"""
Average faces gets 100% accuracy in both cases. I am using both the training and validation images
to train the perceptron. Standardizing the values just makes the perceptron converge faster nine
epochs versus eighteen.
"""
# run_regular_perceptron(0.01, 20)
# run_train_validation_perceptron(0.01, 25)
# run_sym_face_perceptron()
# run_average_face_perceptron(0.01, 20)
run_average_face_perceptron(0.01, 12, standardize=True)
| [
"[email protected]"
] | |
6d680dc75799c1836690c293bc7b5f349f8ff08f | 3cd66a0e29808c577657a5323b9f6e6197e3c6cc | /RL_QG_agents/FC_DQN_rotate.py | 9d184a849f509856997b0f0897409bb2e4987820 | [] | no_license | ReneeYe/NNDL_final | a75a8369e0427e79d3593d9153e30359f6e80c81 | 0a5ca81be1eaf1c08f06feb8e593ac55d9229713 | refs/heads/master | 2021-05-12T00:52:09.456911 | 2018-01-20T06:53:24 | 2018-01-20T06:53:24 | 117,545,107 | 0 | 1 | null | 2018-01-17T04:56:40 | 2018-01-15T12:53:23 | Python | UTF-8 | Python | false | false | 7,821 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 17 17:16:34 2018
@author: Think
"""
import os
import sys
import copy
from collections import deque
import numpy as np
import tensorflow as tf
class DQNAgent:
"""
Multi Layer Perceptron with Experience Replay
"""
def __init__(self, enable_actions, environment_name, layers, rows, cols , model_dir):
# parameters
self.name = os.path.splitext(os.path.basename(__file__))[0]
self.environment_name = environment_name
self.enable_actions = enable_actions.tolist()
self.n_actions = len(self.enable_actions)
self.rows = rows
self.cols = cols
self.layers = layers
self.minibatch_size = 128
self.replay_memory_size = 20000
self.learning_rate = 0.001
#self.learning_rate = 0.005
self.discount_factor = 0.9
self.exploration = 0.1
self.model_dir = model_dir
# self.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
self.model_name = "{}.ckpt".format(self.environment_name)
# replay memory
self.D = deque(maxlen=self.replay_memory_size)
# model
self.init_model()
# variables
self.current_loss = 0.0
def init_model(self):
# input layer (rows x cols)
self.x = tf.placeholder(tf.float32, [None, self.layers, self.rows, self.cols])
# flatten (rows x cols)
size = self.layers * self.rows * self.cols
x_flat = tf.reshape(self.x, [-1, size])
# fully connected layer (32)
W_fc1 = tf.Variable(tf.truncated_normal([size, 2*64], stddev=0.01))
# W_fc1 = tf.Variable(tf.zeros([size, 2*64]))
b_fc1 = tf.Variable(tf.zeros([2*64]))
h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1)
#W_fc2 = tf.Variable(tf.truncated_normal([100, 100], stddev=0.01))
"""
W_fc2 = tf.Variable(tf.zeros([200, 200]))
b_fc2 = tf.Variable(tf.zeros([200]))
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)
W_fc3 = tf.Variable(tf.zeros([200, 200]))
b_fc3 = tf.Variable(tf.zeros([200]))
h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)
"""
# output layer (n_actions)
W_out = tf.Variable(tf.truncated_normal([2*64, self.n_actions], stddev=0.01))
b_out_init = tf.zeros([self.n_actions])
b_out_init = b_out_init + np.array([99,-8,8,6,6,8,-8,99,
-8,-24,-4,-3,-3,-4,-24,-8,
8,-4,7,4,4,7,-4,8,
6,-3,4,1,1,4,-3,6,
6,-3,4,1,1,4,-3,6,
8,-4,7,4,4,7,-4,8,
-8,-24,-4,-3,-3,-4,-24,-8,
99,-8,8,6,6,8,-8,99,0,0])
b_out = tf.Variable(b_out_init)
#b_out = tf.Variable(tf.zeros([self.n_actions]))
self.y = tf.matmul(h_fc1, W_out) + b_out
#self.y = tf.nn.softmax(tf.matmul(h_fc3, W_out) + b_out)
# loss function
self.y_ = tf.placeholder(tf.float32, [None, self.n_actions])
self.loss = tf.reduce_mean(tf.square(self.y_ - self.y))
# train operation
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.training = optimizer.minimize(self.loss)
# saver
self.saver = tf.train.Saver()
# session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def Q_values(self, state):
# Q(state, action) of all actions
return self.sess.run(self.y, feed_dict={self.x: [state]})[0]
def select_action(self, state, targets, epsilon):
if np.random.rand() > epsilon:
# random
return np.random.choice(targets)
else:
# max_action Q(state, action)
qvalue, action = self.select_enable_action(state, targets)
return action
def select_enable_action(self, state, targets):
Qs = self.Q_values(state)
#print(Qs)
#descend = np.sort(Qs)
index = np.argsort(Qs)
for action in reversed(index):
if action in targets:
break
# max_action Q(state, action)
qvalue = Qs[action]
return qvalue, action
def store_experience(self, state, targets, action, reward, state_1, targets_1, terminal):
# print(self.D)
def rotation(state, rot_num):
tmp = copy.deepcopy(state)
for i in range(1,rot_num):
for d in range(3):
tmp_2 = tmp[i]
tmp[i] = np.rot90(tmp_2)
return tmp
def flip_lr(state):
tmp = copy.deepcopy(state)
for d in range(3):
tmp[i] = state[i,:,::-1]
return tmp
def flip_ud(state):
tmp = copy.deepcopy(state)
for d in range(3):
tmp[i] = state[i,::-1,:]
return tmp
def transpose(state):
tmp = copy.deepcopy(state)
for d in range(3):
tmp[i] = state[i].T
return tmp
for i in range(4):
s = rotation(state,i)
s_1 = rotation(state_1,i)
self.D.append((s, targets, action, reward, s_1, targets_1, terminal))
self.D.append((flip_lr(state), targets, action, reward, flip_lr(state_1), targets_1, terminal))
self.D.append((flip_ud(state), targets, action, reward, flip_ud(state_1), targets_1, terminal))
self.D.append((transpose(state), targets, action, reward, transpose(state_1), targets_1, terminal))
def experience_replay(self):
state_minibatch = []
y_minibatch = []
# sample random minibatch
minibatch_size = min(len(self.D), self.minibatch_size)
#print(self.D)
minibatch_indexes = np.random.randint(0, len(self.D), minibatch_size)
for j in minibatch_indexes:
state_j, targets_j, action_j, reward_j, state_j_1, targets_j_1, terminal = self.D[j]
action_j_index = self.enable_actions.index(action_j)
y_j = self.Q_values(state_j)
if terminal:
y_j[action_j_index] = reward_j
else:
# reward_j + gamma * max_action' Q(state', action')
qvalue, action = self.select_enable_action(state_j_1, targets_j_1)
y_j[action_j_index] = reward_j + self.discount_factor * qvalue
state_minibatch.append(state_j)
y_minibatch.append(y_j)
# training
self.sess.run(self.training, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
# for log
self.current_loss = self.sess.run(self.loss, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
def load_model(self, model_path=None):
if model_path:
# load from model_path
self.saver.restore(self.sess, model_path)
else:
# load from checkpoint
checkpoint = tf.train.get_checkpoint_state(self.model_dir)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
def save_model(self, epoch):
"""
epoch = 目前阶段数
"""
model_name_iter = self.model_dir + self.environment_name + str(epoch)+".ckpt"
self.saver.save(self.sess, model_name_iter)
# model_name_iter = self.environment_name + str(epoch) + ".ckpt"
# self.saver.save(self.sess, os.path.join(self.model_dir, model_name_iter)) | [
"[email protected]"
] | |
8cca7863aa65bd4cf6d5a46f0dfad8bc41aa5bd5 | bd6523dd51e568af6c72878d7f3684c45fbf3b76 | /myapp/urls.py | b16b1183e295193a101b0266ef0ffea31aaa360a | [] | no_license | DevBasito/First-Django-App | 99419885b29e3f43dde8f79f764e4f4dc296c12d | f419b5da8350610a1ae198b3c3c7dd55f83f24cb | refs/heads/main | 2023-04-01T05:23:35.683471 | 2021-04-14T01:01:10 | 2021-04-14T01:01:10 | 357,723,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | """NewProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('index/', views.index, name = "index")
]
| [
"[email protected]"
] | |
fce0269827a14f3f85fcd0b22b01e547f10b13c1 | bfad81de459052ea439a06d4346a4fdc60b3cad2 | /Project/accounts/views.py | 9e83bf61fc7df347459ad04b46aac45b34dca64a | [] | no_license | MarCastellnou/Deriverable_Proj-Web | ecc9841b48c479cc05071a85f94bc92d5b8323a5 | 0a3b39e3afe1cc233c0e0e0186635037da26e0cd | refs/heads/master | 2021-07-19T02:49:02.962373 | 2019-05-26T09:54:39 | 2019-05-26T09:54:39 | 175,597,908 | 0 | 0 | null | 2020-06-05T20:59:43 | 2019-03-14T10:17:58 | Python | UTF-8 | Python | false | false | 311 | py |
# Create your views here.
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
class SignUp(generic.CreateView):
form_class = UserCreationForm
template_name = 'registration/signup.html'
success_url = reverse_lazy('login')
| [
"[email protected]"
] | |
108944303a0843ceaf9057331af7cf9f798aaebd | 93ae12b7b48eede793c87df34a711b97511db535 | /python/linked_list.py | a95a396a4174f98467e9c9ba35ae0b44270900f9 | [] | no_license | oneilk/data-structures | af9e737e5fe54bc244b1ae2a4757c2a8bdfe331b | 4ef38fe51e6273fba1365d998cf898975aab96de | refs/heads/master | 2021-11-27T16:35:43.996258 | 2021-08-13T15:06:06 | 2021-08-13T15:06:06 | 254,439,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | class Node:
def __init__(self, data=None, next=None) -> None:
self.data = data
self.next = next
class LinkedList:
def __init__(self) -> None:
self.head = None
def insert_at_beginning(self, data) -> None:
node = Node(data, self.head)
self.head = node
def insert_at_end(self, data) -> None:
node = Node(data)
if self.head is None:
self.head = node
else:
itr = self.head
while itr.next:
itr = itr.next
itr.next = node
def insert_values(self, data_list) -> None:
self.head = None
for data in data_list:
self.insert_at_end(data)
def get_length(self) -> int:
count = 0
itr = self.head
while itr:
count += 1
itr = itr.next
return count
def remove_at(self, index) -> None:
if index < 0 or index >= self.get_length():
raise Exception("Invalid index")
if index == 0:
self.head = self.head.next
return
count = 0
itr = self.head
while count + 1 != index:
count += 1
itr = itr.next
itr.next = itr.next.next
def insert_at(self, index, data) -> None:
if index == 0:
self.insert_at_beginning(data)
return
if index == self.get_length():
self.insert_at_end(data)
return
count = 0
itr = self.head
while itr:
if count == index - 1:
node = Node(data, itr.next)
itr.next = node
break
itr = itr.next
count += 1
def print(self) -> None:
if self.head is None:
print("Linked List is empty")
return
itr = self.head
llstr = ""
while itr:
llstr += str(itr.data) + " --> "
itr = itr.next
print(llstr)
if __name__ == "__main__":
ll = LinkedList()
ll.insert_values(["pig", "elephant", "horse", "chicken"])
ll.insert_at(6, "eagle")
ll.insert_at(2, "eagle")
ll.print()
| [
"[email protected]"
] | |
cf8a43035c2a8399bb7968d9c6ceed3450f33784 | 404dc317b6c3a313fe58f5fe632fec39a56dbea5 | /project/database_connection_tests/database_connection_raspberry.py | 5225c8e2272585fb69a0ffad7a6bc333670f0fd6 | [
"MIT"
] | permissive | chr0x6eos/AttendID | 4ffe222df9f3f083098903b8374e62a1d791d6cb | b9aca98e3f58fa2823af6d910e625dcad014b11a | refs/heads/master | 2022-07-16T20:05:15.839906 | 2020-05-17T09:22:47 | 2020-05-17T09:22:47 | 221,528,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #!/usr/bin/python3
import mysql.connector
from time import strftime
import random
'''
def getDBs(mycursor): #Return all dbs
dbArray = []
mycursor.execute("SHOW DATABASES")
for x in mycursor:
dbArray.append(x)
return dbArray
def createDB(mycursor, dbName): #Creates DB if it does not already exists
for x in getDBs(mycursor):
if x == dbName:
return
mycursor.execute(f'CREATE DATABASE {dbName}') #F is the python string builder
'''
myDB = None #Same as null
try:
myDB = mysql.connector.connect(
host="localhost",
#user="possegger",
#passwd="P@ssw0rd$!"
user="simon",
passwd="P@ssw0rd$!-278",
database="attendID"
)
mycursor = myDB.cursor()
#CreateDB if not already exists
#createDB(mycursor, 'attendID')
#Create Table
#mycursor.execute("CREATE TABLE attendingStudents (id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255))")
#Insert into table
sql_query = ("INSERT INTO AttendingStudents (TimeStamp,Class,AttendingStudents) VALUE (%s,%s,%s)")
#sql_value = ("2018-11-05 11:31:00","4AHITN", 0)
sql_value = (strftime("%Y-%m-%d %H:%M:%S"),"4AHITN",random.randint(1,16))
mycursor.execute(sql_query,sql_value)
#Commit changes
myDB.commit()
except Exception as x:
print(x)
finally:
if myDB:
myDB.close()
| [
"[email protected]"
] | |
356fffb2a72db9229eaca3a2370021d520335237 | 6c46bc29b771d27cd1b205111675cf1529270f84 | /Polls_App/views.py | 617b4a1862da073865ba00cc09529565702bec6f | [] | no_license | ChristianHallerX/DJPoll_Webapp | 47936b943f47e648183e1ecdc2888fa967ae5f33 | 79e57bfd4b4e1bb62ccbca0b79e887e71761aef3 | refs/heads/main | 2023-01-08T19:58:24.761846 | 2020-11-07T04:55:07 | 2020-11-07T04:55:07 | 310,751,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'Polls_App/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'Polls_App/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'Polls_App/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'Polls_App/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('Polls_App:results', args=(question.id,)))
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'Polls_App/results.html', {'question': question}) | [
"[email protected]"
] | |
51e82f206d6cb158d43c6493732e099a1e934368 | 89f03b452f7d0279435156fe13c2f6fbe9b949ce | /lib/modules/paperparse.py | 7f653657fdac51487db14ee6657518d9a7147e98 | [] | no_license | CSB5/atminter | 99408e10a6457d7649fe893c27e6f066ca2e95db | feae3672f41ef151dbe2e70d7dad6a00500ab874 | refs/heads/master | 2021-01-13T01:02:51.407291 | 2016-03-03T07:25:02 | 2016-03-03T07:25:02 | 52,598,290 | 7 | 6 | null | null | null | null | UTF-8 | Python | false | false | 7,954 | py | #!/usr/bin/env python3
"""
paperparse.py
A set of functions to deal with pubcrawl data
"""
import nltk
import os
import re
import json
"""
getNames(filePath):
input:
pubcrawl json
output:
names, shortened name and genus of all species in the file_name
Sample pubcrawl output file:
Escherichia_coli#Pseudomonas_aeruginosa.compiled
Escherichia_coli#Pseudomonas_aeruginosa.sp
Escherichia_coli#Pseudomonas_aeruginosa.json
Resultant getNames output:
[['escherichia coli', 'e. coli', 'escherichia'], ['pseudomonas aeruginosa', 'p. aeruginosa', 'pseudomonas']]
"""
def getNames(filePath):
def shorten(tup):
return tup[0][0] + '. ' + tup[1]
filePath = os.path.basename(filePath)
name = os.path.splitext(filePath)[0]
# print(name)
name = [i.split('_') for i in name.split('#')]
name = [[i.lower() for i in j] for j in name]
#check if genus only
# print(name)
if len(name[0]) ==1:
return [[i[0]] for i in name]
return [[" ".join(i), shorten(i), i[0]] for i in name]
"""
loadFile(filepath):
generic file input. Takes in the file as raw data and returns a list of stripped and lowered lines.
"""
def loadFile(filePath):
holder = []
with open(filePath) as f:
for i in f:
holder.append(i.strip().lower())
return holder
"""
tagStrip(line):
removes the medline tag from the line
"""
def tagStrip(line):
return line[6:]
"""""""""""""""""""""
#####################
# .sp Files #
#####################
"""""""""""""""""""""
"""
WARNING: OUTDATED. Currently kept to maintain compatibility
spFile():
Class representation of a single .sp file. Contains the title, abstract, and their respective stemmed and tokenized forms
loadSection(section):
Loads a .sp section into split {TERM: DATA} dicitonaries.)
readSpFile(spFIlePath):
reads a SP file
NOTE: Use as base class for all the other paper derivatives
NOTE: For all future pubcrawl outputs, pmid is NECESSARY
"""
class spFile():
#@profile
def __init__(self, spFilePath, purge = False, reduced = False):
self.file_name = os.path.basename(spFilePath)
self.species_names = os.path.splitext(self.file_name)[0].replace("_", " ").split("#")
loaded = self.readSpFile(spFilePath, reduced = reduced)
#print(loaded)
self.summary = self.loadSection(loaded["SUMMARY"])
if reduced:
return
if purge:
for i in self.summary:
self.summary[i] = '0'
papers = loaded['PAPERS'].split('\n\n')
self.papers = [self.loadSection(i) for i in papers]
#print(self.papers)
if purge:
for i in self.papers:
if i == {}:
continue
i["TIHT"] = ''
i["ABHT"] = ""
self.papers = [i for i in self.papers if i != {}]
#@profile
def loadSection(self, section):
#holder = [i.split("==") for i in section.split('\n') if i != '' and i != '\n']
#HARDCODING
holder = []
for i in section.split('\n'):
if i == '' or i == '\n':
continue
holder.append((i[:4], i[6:].strip()))
try:
result = {i:j.strip() for i,j in holder}
except ValueError:
print("ERROR")
print(holder)
print(section)
raise
return result
#@profile
def readSpFile(self, spFilePath, reduced = False):
if reduced ==True:
total = ''
with open(spFilePath) as f:
while(f.readline()[0] != "@"):
pass
total+= f.readline()
total+= f.readline()
total+= f.readline()
# print(total)
return {"SUMMARY": total}
holder = {}
try:
with open(spFilePath) as f:
for i in f:
#find the first section
if i[0] == '#':
continue
if i[0] == '@':
current = i[1:].strip()
holder[current] = ''
else:
#account for empty lines
if i == '':
continue
#this line is slow, fix it.
holder[current] += i
except:
print("readSpFileError: ", spFilePath)
raise
return holder
#reads the list of papers, converts them into paper tuples
#@profile
def loadPapers(self, rawAbstractList):
holder = []
res = []
for i in rawAbstractList:
if i[0] == ">":
if holder == []:
holder = [i[2:]]
else:
res.append(holder)
holder = [i[2:]]
else:
holder.append(i)
return res
def writeSpFile(self, filePath):
with open(filePath, 'w') as f:
#handle the summary
f.write("@SUMMARY\n")
for i in self.summary:
f.write('== '.join([i, self.summary[i]]) + '\n')
f.write("@PAPERS\n")
for paperDict in self.papers:
f.write("== ".join(["PMID", paperDict["PMID"]]) + "\n")
f.write("== ".join(["TI ", paperDict["TI "]]) + "\n")
f.write("== ".join(["AB ", paperDict["AB "]]) + "\n")
f.write("== ".join(["TIHT", paperDict["TIHT"]]) + "\n")
f.write("== ".join(["ABHT", paperDict["ABHT"]]) + "\n\n")
def writeSpFileHits(self, filePath):
with open(filePath, 'w') as f:
#handle the summary
f.write("@SUMMARY\n")
for i in self.summary:
f.write('== '.join([i, self.summary[i]]) + '\n')
f.write("@PAPERS\n")
for paperDict in self.papers:
if not (paperDict["TIHT"] or paperDict["ABHT"]):
continue
f.write("== ".join(["PMID", paperDict["PMID"]]) + "\n")
f.write("== ".join(["TI ", paperDict["TI "]]) + "\n")
f.write("== ".join(["AB ", paperDict["AB "]]) + "\n")
f.write("== ".join(["TIHT", paperDict["TIHT"]]) + "\n")
f.write("== ".join(["ABHT", paperDict["ABHT"]]) + "\n\n")
def export(self):
print("file_name: ", self.file_name)
print("SUMMARY: ", self.summary)
#Updated spFile. Use in preference
SpFile_file_pattern = re.compile("\@(\w+)\n")
SpFile_term_pattern = re.compile(r"([\w ]*)==")
"""
SpFile:
Fundamental storage file for all later processing. Replaces spFile
input:
path to a target json file output by pubcrawl/any minter module
flags:
purge
removes all annotation data
reduced
removes all papers
"""
class SpFile():
#@profile
def __init__(self, file_path, purge = False, reduced = False):
self.file_name = os.path.basename(file_path)
self.species_names = os.path.splitext(self.file_name)[0].replace("_", " ").split("#")
with open(file_path) as f:
data = json.load(f)
self.summary = {i:data["SUMMARY"][i]for i in data["SUMMARY"]}
self.papers = [i for i in data["PAPERS"]]
# handle the case of empty TIHT/ABHT
#remove annotation data from papers if needed
if purge:
for i in self.papers:
i["ABHT"] = []
i["TIHT"] = []
else:
for i in self.papers:
if not i["ABHT"]:
i["ABHT"] = []
if not i["TIHT"]:
i["TIHT"] = []
def writeSpFile(self, file_path):
with open(file_path, 'w') as f:
output = dict()
output["SUMMARY"] = self.summary
output["PAPERS"] = self.papers
json.dump(output, f)
def writeSpFileHits(self, file_path):
output = dict()
output["SUMMARY"] = self.summary
output["PAPERS"] = []
for paper in self.papers:
if paper["TIHT"] or paper["ABHT"]:
output["PAPERS"].append(paper)
with open(file_path, "w") as f:
json.dump(output, f)
def export(self):
print("file_name: ", self.file_name)
print("SUMMARY: ", self.summary)
"""
loadSpFileDir(dirPath)
input
A path to a directory containing only .sp Files
returns
A list of spFile objects for all spFiles
"""
def loadSpFileDir(dirPath, purge = False):
files = os.listdir(dirPath)
if dirPath[-1] != "/":
dirPath += '/'
files = [dirPath + i for i in files]
return [spFile(i, purge = purge) for i in files]
if __name__ == "__main__":
#target = '../input/pattern/smalltestann/Actinomyces_sp.#Bacteroides_sp..sp'
#target = '/home/esyir/Documents/A-star/Quetzalcoatl/work_permissive/train_test/set_2/ann_files/Lactobacillus_reuteri#Streptococcus_salivarius.sp'
target = '/home/esyir/Documents/A-star/Quetzalcoatl/work_permissive/train_test/set_2/ann_files/Staphylococcus_aureus#Enterococcus_faecalis.sp'
# outPath = '../formats_and_standards/tester/tester.sp'
#cProfile.run("spFile('{}')".format(target))
temp = SpFile(target)
temp.writeSpFile("test.txt")
temp2 = spFile(target)
# temp.writeSpFile(outPath) | [
"[email protected]"
] | |
0832a3791935232166da66c9a990e7cea2789352 | df8eacf8d9866b39eb4b8f9b073b9edba54bbf00 | /scripts/Oman-crawler.py | 211574d99d8054ecae36b22242275339b10f9fbc | [] | no_license | Leontian36/COVID19-Case-Data-Crawler | 9d423fd427503061450e0f2250bc5f008ee217a2 | ded89329ed1054573acdfa138bb6bcd01a694303 | refs/heads/master | 2023-03-10T19:45:40.127542 | 2021-02-25T08:23:43 | 2021-02-25T08:23:43 | 308,104,927 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | '''
@Author: Matthew Shabet
@Date: 2020-08-02 21:51:00
@LastEditTime: 2020-08-02 22:48:00
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
'''
import csv
import requests
import json
import os
from datetime import datetime
url = 'https://covid19.moh.gov.om/#/home'
# The above URL actually gets its data from here:
url = 'https://covid19.moh.gov.om/ens/outbreak/getRegionWalayatSummary'
# Get the data
response = requests.get(url, headers={'Connection': 'close'}, verify=False)
data = json.loads(response.text)["result"]
# Create and open the CSV
mkfile_time = datetime.strftime(datetime.now(), '%Y%m%d%H%M')
folder_path = './photo/Oman/'+ mkfile_time + '/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file = open(folder_path+'table.csv', 'w', newline='', encoding='utf-8-sig')
writer = csv.writer(file)
# Write each line to the CSV
headers = ["Region", "Confirmed", "Sick", "Recovered", "Deaths", "Suspected", "Quarantined"]
writer.writerow(headers)
for d in data:
row = [d["regionName"], d["infected"], d["currentlySick"], d["recovered"], d["death"], d["suspected"], d["quarantined"]]
writer.writerow(row) | [
"[email protected]"
] | |
5fb75e7b80dbd906d47244a9a9291014ec7327c6 | 62ba507931d414560593d5eab4673efcda1f5b03 | /flyt/settings.py | d4470b3751f13727e38e37f0444258c8d1db7ee5 | [] | no_license | jackdcal/my-first-blog | 01a17281abe25f3389245f51b6f0a012138094fa | 77108ca1802b07cc03df258f6fbeca083f5c2692 | refs/heads/master | 2022-12-06T02:23:24.878813 | 2018-03-18T16:14:31 | 2018-03-18T16:14:31 | 125,280,714 | 0 | 1 | null | 2022-11-28T13:36:00 | 2018-03-14T22:19:17 | Python | UTF-8 | Python | false | false | 3,199 | py | """
Django settings for flyt project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*8_=5x!(i&7t!u1d7k3n72#4l5!v5+@xow-t+h)m0xmzslped%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'flimp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flyt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flyt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
ebf7bb12afc082342bc8d6d0fd6fca1f21654281 | c8ea4fe0dccca928b92234b72a7a8d9cd6cf4d14 | /eth2/beacon/types/candidate_pow_receipt_root_records.py | 627a370464b39a59f5665fb194d34d3d8124199d | [
"MIT"
] | permissive | kclowes/trinity | b6bc4f7c57ade1651cf9b2ca9ca88493f3485007 | f0400c78a6d828dd266b1f31dd3fa7aacf97486d | refs/heads/master | 2020-04-16T16:11:28.531260 | 2019-01-14T17:03:56 | 2019-01-14T17:44:58 | 165,728,497 | 0 | 0 | MIT | 2019-01-14T20:17:01 | 2019-01-14T20:17:00 | null | UTF-8 | Python | false | false | 648 | py | from eth_typing import (
Hash32,
)
import rlp
from eth2.beacon.sedes import (
uint64,
hash32,
)
class CandidatePoWReceiptRootRecord(rlp.Serializable):
"""
Note: using RLP until we have standardized serialization format.
"""
fields = [
# Candidate PoW receipt root
('candidate_pow_receipt_root', hash32),
# Vote count
('votes', uint64),
]
def __init__(self,
candidate_pow_receipt_root: Hash32,
votes: int) -> None:
super().__init__(
candidate_pow_receipt_root=candidate_pow_receipt_root,
votes=votes,
)
| [
"[email protected]"
] | |
a5f41dd264235f371341785ae18342dfd49ce5f8 | de9eeea51e189cb96435700dfa04045cdd555bf5 | /filme.py | 0e6f84092c707b39791620fac5402219e47be303 | [] | no_license | jrbytes/python-oo | 2acdf25803e4e120b59e7dda5e11b9a57e6c2ba3 | 28192c34285709178063e46b1412ac2ba2896305 | refs/heads/main | 2023-07-24T13:03:46.489754 | 2021-09-07T22:32:23 | 2021-09-07T22:32:23 | 402,584,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,829 | py | class Programa:
def __init__(self, nome, ano):
self._nome = nome.title()
self.ano = ano
self._likes = 0
@property
def likes(self):
return self._likes
def dar_like(self):
self._likes += 1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, novo_nome):
self._nome = novo_nome.title()
def __str__(self):
return f'{self._nome} - {self.ano} - Likes: {self._likes}'
class Filme(Programa):
def __init__(self, nome, ano, duracao):
super().__init__(nome, ano)
self.duracao = duracao
def __str__(self):
return f'{self._nome} - {self.ano} - {self.duracao} min - {self._likes} likes'
class Serie(Programa):
def __init__(self, nome, ano, temporadas):
super().__init__(nome, ano)
self.temporadas = temporadas
def __str__(self):
return f'{self._nome} - {self.ano} - {self.temporadas} temp - {self._likes} likes'
class Playlist:
def __init__(self, nome, programas):
self.nome = nome
self._programas = programas
def __getitem__(self, item):
return self._programas[item]
@property
def listagem(self):
return self._programas
def __len__(self):
return len(self._programas)
vingadores = Filme('vingadores - guerra infinita', 2018, 160)
atlanta = Serie('atlanta', 2018, 2)
tmep = Filme('Todo mundo em panico', 1999, 100)
demolidor = Serie('Demolidor', 2016, 2)
vingadores.dar_like()
tmep.dar_like()
demolidor.dar_like()
demolidor.dar_like()
atlanta.dar_like()
atlanta.dar_like()
filmes_e_series = [vingadores, atlanta, demolidor, tmep]
playlist_fim_de_semana = Playlist('fim de semana', filmes_e_series)
print(f'Tamanho do playlist: {len(playlist_fim_de_semana)}')
for programa in playlist_fim_de_semana:
print(programa) | [
"[email protected]"
] | |
90cda8b621b8d7167515603ba494c865c5e93847 | a3d68ad77a64222ab4294e2081be9eeabc634fb9 | /Unique-Subsequence.py | ac44d3e05ff571d20fd9f58f663f67ab2e9f6558 | [] | no_license | LamThanhNguyen/HackerEarth-Solutions | 2c8fe9fad1b816c50cc239defc67750d0817b505 | bf7a10af66c6d95aee2a60ad2cd1ed958cb112f8 | refs/heads/master | 2023-03-19T02:54:19.811429 | 2021-02-02T15:53:50 | 2021-02-02T15:53:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | t = int(input())
for _ in range(t):
n = int(input())
s = str(input())
ans = 1
for i in range(n-1):
if(s[i+1]!=s[i]):
ans = ans + 1
print(ans) | [
"[email protected]"
] | |
9be647f84cdc463b3893ff68138932be83142b12 | 11d2f842f86d1b4297d8b0dc0a75308b9a54f5b9 | /basic.py | 3e15d5d81da03a1b3c2d5438dfda2cfb31f1d00a | [] | no_license | brianfarris/backprop | a6fd9f273f2ff851ea9884cabcd3620fd2c41482 | a376f304b3fc3ab1071b221cc23f03f1a9802e4d | refs/heads/master | 2020-04-27T00:51:55.625115 | 2019-03-06T16:47:36 | 2019-03-06T16:47:36 | 173,947,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import numpy as np
from vertices.vertices_general import Input
from vertices.vertices_basic import Multiplication, Addition, Inverse, Squared, Sigmoid
from backprop.traverse import Traverse
if __name__ == "__main__":
x = Input(name="x")
x.value = np.array(3)
y = Input(name="y")
y.value = np.array(-4)
sigy = Sigmoid([y], name="sigy")
numerator = Addition([x, sigy], name="numerator")
x_plus_y = Addition([x, y], name="x_plus_y")
x_plus_y_sq = Squared([x_plus_y], name="x_plus_y_sq")
sigx = Sigmoid([x], name="sigx")
denominator = Addition([sigx, x_plus_y_sq], name="denominator")
denominator_inv = Inverse([denominator], name="denominator_inv")
mult = Multiplication([numerator, denominator_inv])
learning_rate = 1.0
traverse = Traverse(learning_rate)
traverse.forward(mult)
print("L: ", traverse.stack[-1].value)
mult.grad_value = 1.
traverse.backward(print_grads=True)
| [
"[email protected]"
] | |
311a04dfb1911906cea033ab338c4079cd95482d | 4133bd4de741900a599d2acede88c14589652454 | /lista_5/questaoD.py | 86e9be9f5eed6fc2478e698158ecb174283bc46e | [] | no_license | victorvhs/zumbi_curso | 5659a8fbc352ec4174230cda0542957951fa4ba8 | 3afb363874cca2286615d3595c5f50efc865cde1 | refs/heads/master | 2021-06-03T02:36:47.847231 | 2021-05-21T23:28:19 | 2021-05-21T23:28:19 | 64,033,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Daniela é uma pessoa muito supersticiosa. Para ela, um número é sortudo
# se ele contém o dígito 2 mas não o dígito 7. Então, na opinião dela, quantos números
# sortudos existem entre 18644 e 33087, incluindo os extremos?
resposta = 0
for i in range(18643, 33088):
num = str(i)
if '2' in num and not '7' in num:
resposta += 1
print("Existem ", resposta, " numero sortudos")
| [
"[email protected]"
] | |
683177c8d14ce350ca90ae3aabae0fac52efcd38 | 8a6eb38e4afe8bd937c4748c1afc1d4df6dcb875 | /pages/basePage.py | 04a4b4a05b63bab91cf0236ee29dfe47d18a2452 | [] | no_license | lv-1213/EasyPay | 6d46d5b59c7266e077ac1e38701cd08ccdb80593 | 402b026742d4d0cdb915728efa091d4dbf828f5d | refs/heads/master | 2022-11-15T12:53:51.526560 | 2020-07-01T07:36:28 | 2020-07-01T07:36:28 | 276,309,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | import time
from selenium.webdriver.support.select import Select
from selenium import webdriver
class Page:
username_css = ["css selector", '[id="username"]']
password_css = ["css selector", '[id="password"]']
button_css = ["css selector", '[ng-click="login()"]']
url = "http://172.16.100.115/Eviews/static/login.html"
#故障台数
xpathNum = ["xpath",'//*[text()="故障台数"]']
#故障记录
xpathRecord = ["xpath", '//*[text()="故障记录"]']
list_css = ["css selector",'li[class="ng-scope status_normal"]']
starttime_css = ["css selector",'.search>input[placeholder="开始日期"]']
datatime_css=["css selector",'.search>input[placeholder="结束日期"]']
nowtime_xpath = ["xpath",'//*[text()="今天"]']
#设备类型
select_css = ["xpath",'//*[@id="main"]/div[1]/div[3]/div/div[1]/select']
#查询按钮
query_xpath = ["xpath",'//*[@id="main"]/div[1]/div[3]/div/div[1]/a']
#输入设备编号
inputNum_css = ["css selector",'[placeholder="设备编号"]:nth-of-type(3)']
#页面展示多少条数据
statsdata_xpath=["xpath",'//*[@id="main"]/div[1]/div[3]/div/div[2]/a[6]']
#页面共多少页
numberpage_xpath=["xpath",'//*[@id="main"]/div[1]/div[3]/div/div[2]/a[3]']
#故障码
faultcode_css = ["css selector",'[placeholder="故障码"]']
#tag_NAME
tagName = ["tag name","span"]
#第几页
howpage =["css selector",'#main [ng-show*="breakdown"] div[class="page"]>a:nth-child(3)']
#下一页按钮
nextpage = ["css selector",'#main [ng-show*="breakdown"] div[class="page"]>a:nth-child(4)']
ssss = ["xpath",'//*[@id="body"]/div[2]/ul/li[2]/a']
def __init__(self,driver):
self.driver=driver
self.driver.implicitly_wait(10)
def element_element(self,li,a):
return li.find_elements_by_tag_name(a)
def get_web_picture(self,userpath):
self.driver.get_screenshot_as_file(userpath)
#点击
def click_element(self, locator):
self.driver.find_element(locator[0], locator[1]).click()
#清空
def clear_element(self, locator):
self.driver.find_element(locator[0], locator[1]).clear()
#输入
def input_text(self, locator, text):
self.driver.find_element(locator[0], locator[1]).clear()
self.driver.find_element(locator[0], locator[1]).send_keys(text)
# 获取复数元素
def get_webelements(self, locator):
return self.driver.find_elements(locator[0], locator[1])
# 获取单数元素
def get_webelement(self, locator):
return self.driver.find_element(locator[0], locator[1])
# 选择下拉框
def select_option(self, locator, option):
select = Select(self.get_webelement(locator))
select.select_by_visible_text(option)
def get_url(self):
time.sleep(1)
return self.driver.current_url
# 关闭浏览器
def closr_browser(self):
self.driver.quit()
| [
"[email protected]"
] | |
a7ac62b50c4891a9aa0b3fbc99827e94a02774dc | ac76f42e9019a100fa961b4808353fe870de96b1 | /streamproject/stream/migrations/0002_auto_20170909_1121.py | 54d9b7cc79859a2dcac195dc33eba36461c3b001 | [] | no_license | lukalux/streamproject | eb2033a8ebe15c0bd59c12a7279199ab2566850d | 8f0722ac55c85e4475665e0d248dfdacaa706975 | refs/heads/master | 2021-07-01T08:10:01.923639 | 2017-09-19T08:07:10 | 2017-09-19T08:07:10 | 103,264,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-09 11:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stream', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='link',
options={'ordering': ['modified_at'], 'verbose_name': 'Link', 'verbose_name_plural': 'Links'},
),
migrations.AddField(
model_name='link',
name='modified_at',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='link',
name='sport',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AlterField(
model_name='link',
name='time',
field=models.DateTimeField(),
),
]
| [
"[email protected]"
] | |
6c1561ad94133e8d8301c032b4957e2746465fd0 | 69d1c9de1b2c871dcc63d39cbd280cd03d79a807 | /config/wsgi.py | 8a859836874c0b70b97fbd4e992b3177fbf5744c | [
"MIT"
] | permissive | tkovalsky/gas | e31b8e88641f799e227abfbbf7485d67189795b5 | 2d31b2cbe3b630667e2b53c2595cf27c5f5a54f6 | refs/heads/master | 2020-12-02T08:11:26.501904 | 2017-07-10T14:36:42 | 2017-07-10T14:36:42 | 96,785,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | """
WSGI config for gas project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# gas directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'gas'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"[email protected]"
] | |
6f9d1ca2158cb61dd3de68ba6dd7d2cfc346bba9 | ae4beeba76214703baf8e216925768b9ac61a532 | /cancel_all_orders_app/models/sale_order_inherit.py | 07aa5ec22ef8721f4a1617cae5347c1abeed9c1a | [] | no_license | hassanfadl/anavale-produce | fcf8fa92d8efdd6ee6cefcae5a39f0b01a5691b0 | 5c7f338ddec74278b1f267d2ea97548f0702c8a6 | refs/heads/master | 2023-07-07T17:53:59.135708 | 2021-09-08T04:32:16 | 2021-09-08T04:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class sale_order(models.Model):
_inherit = 'sale.order'
def action_cancel(self):
for picking in self.picking_ids:
if picking.state != 'cancel':
picking.action_cancel()
for invoice in self.invoice_ids :
if invoice.state != 'cancel':
invoice.button_draft()
invoice.button_cancel()
invoice.update({'name': '/'})
res = super(sale_order, self).action_cancel()
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"[email protected]"
] | |
12def74e21dd870bff36062191e93cdab5720fce | b697b98db859c061c1174837deee1d6fc47d115e | /tests/spot/margin/test_margin_all_assets.py | 2ee1ab08ffb59ffeea81fbbbc20d749b7bba3c46 | [
"MIT"
] | permissive | leozaragoza/binance-connector-python | 7e684d6e68ff7d580b7e3fa83f952540a79b1120 | 3311d102c9e788e3d71047f0af103c00d1ae2162 | refs/heads/master | 2023-07-15T12:27:50.041388 | 2021-08-22T17:08:38 | 2021-08-22T17:08:38 | 396,354,910 | 3 | 0 | MIT | 2021-08-22T17:08:38 | 2021-08-15T13:12:41 | Python | UTF-8 | Python | false | false | 509 | py | import responses
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
@mock_http_response(responses.GET, "/sapi/v1/margin/allAssets", mock_item, 200)
def test_margin_all_assets():
"""Tests the API endpoint to margin all assets"""
client = Client(key, secret)
response = client.margin_all_assets()
response.should.equal(mock_item)
| [
"[email protected]"
] | |
66c0dd4b17b9f0804800ca2d7b2473bd423f4fd4 | 3034cb06289f747066571c4ab54ca81996c22319 | /module_utils/RubrikLib_Int/rubrik_lib_int/models/managed_volume_snapshot_config.py | 8920056e191dcbe78417b4517c725d86d507ed9f | [] | no_license | tarunactivity/ansible-rubrik | b2f644805f13a553bd0635e6ddc230257d125ef7 | 5d978c23902fd32d92cc90c75e48e5fe2209f8e0 | refs/heads/master | 2023-04-29T04:25:26.834701 | 2023-04-20T21:58:47 | 2023-04-20T21:58:47 | 116,251,368 | 0 | 0 | null | 2018-01-04T11:18:38 | 2018-01-04T11:18:37 | null | UTF-8 | Python | false | false | 812 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ManagedVolumeSnapshotConfig(Model):
"""ManagedVolumeSnapshotConfig.
:param retention_config:
:type retention_config: :class:`BaseOnDemandSnapshotConfig
<rubriklib_int.models.BaseOnDemandSnapshotConfig>`
"""
_attribute_map = {
'retention_config': {'key': 'retentionConfig', 'type': 'BaseOnDemandSnapshotConfig'},
}
def __init__(self, retention_config=None):
self.retention_config = retention_config
| [
"[email protected]"
] | |
1154529d53964c2a56b317c0897fb25cef84cd99 | 842496e8a5fc5b2b58300a5882784083a3c65e52 | /quantumflow/xqiskit_test.py | c72453a02ee4394b7f73733d4343b12f55fd185b | [
"Apache-2.0"
] | permissive | alexgalda/quantumflow-dev | b687f8dd5d5f51e3ace66a339e2da11e428efcf4 | be6a4d0527c8a9b3d936dcb11f04bb68962b1bf8 | refs/heads/master | 2023-08-11T18:49:20.880858 | 2021-08-17T20:47:42 | 2021-08-17T20:47:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,394 | py | # Copyright 2019-, Gavin E. Crooks and contributors
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Unit tests for quantumflow.xqiskit
"""
# fmt: off
import pytest; pytest.importorskip("qiskit") # noqa: E702
# fmt: on
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
import quantumflow as qf
from quantumflow.xqiskit import (
QiskitSimulator,
circuit_to_qasm,
circuit_to_qiskit,
qasm_to_circuit,
qiskit_to_circuit,
translate_to_qiskit,
)
def test_qiskit_to_circuit() -> None:
q = QuantumRegister(5)
c = ClassicalRegister(5)
qc = QuantumCircuit(q, c)
qc.ccx(q[0], q[1], q[2])
qc.ch(q[0], q[1])
qc.crz(0.1, q[0], q[1])
qc.cswap(q[0], q[1], q[2])
# The QuantumCircuit.cu1 method is deprecated as of 0.16.0.
# You should use the QuantumCircuit.cp method instead, which acts identically.
# The QuantumCircuit.cu3 method is deprecated as of 0.16.0.
# You should use the QuantumCircuit.cu method instead,
# where cu3(ϴ,φ,λ) = cu(ϴ,φ,λ,0).
# The QuantumCircuit.u1 method is deprecated as of 0.16.0.
# You should use the QuantumCircuit.p method instead, which acts identically.
# The QuantumCircuit.u2 method is deprecated as of 0.16.0.
# You can use the general 1-qubit gate QuantumCircuit.u instead:
# u2(φ,λ) = u(π/2, φ, λ).
# Alternatively, you can decompose it in terms of QuantumCircuit.p
# and QuantumCircuit.sx
# u2(φ,λ) = p(π/2+φ) sx p(π/2+λ) (1 pulse on hardware).
# The QuantumCircuit.u3 method is deprecated as of 0.16.0.
# You should use QuantumCircuit.u instead, which acts identically.
# Alternatively, you can decompose u3 in terms of QuantumCircuit.p
# and QuantumCircuit.sx
# u3(ϴ,φ,λ) = p(φ+π) sx p(ϴ+π) sx p(λ) (2 pulses on hardware).
qc.cp(0.1, q[0], q[1])
# qc.cu3(0.1, 0.2, 0.3, q[0], q[1])
qc.cx(q[0], q[1])
qc.cy(q[0], q[1])
qc.cz(q[0], q[1])
qc.h(q[0])
qc.i(q[1])
qc.i(q[2])
qc.rx(0.0, q[0])
qc.ry(0.1, q[1])
qc.rz(0.2, q[2])
qc.rzz(0.1, q[0], q[1])
qc.s(q[2])
qc.sdg(q[2])
qc.swap(q[0], q[1])
qc.t(q[1])
qc.tdg(q[1])
qc.p(0.2, q[2])
qc.u(0.1, 0.2, 0.3, q[2])
qc.x(q[0])
qc.y(q[0])
qc.z(q[0])
circ = qiskit_to_circuit(qc)
# print(circ)
assert (
str(circ)
== """Circuit
CCNot 0 1 2
CH 0 1
CRz(1/10) 0 1
CSwap 0 1 2
CPhase(1/10) 0 1
CNot 0 1
CY 0 1
CZ 0 1
H 0
I 1
I 2
Rx(0) 0
Ry(1/10) 1
Rz(1/5) 2
Rzz(1/10) 0 1
S 2
S_H 2
Swap 0 1
T 1
T_H 1
PhaseShift(1/5) 2
U3(1/10, 1/5, 3/10) 2
X 0
Y 0
Z 0"""
)
circuit_to_qiskit(circ)
def test_qiskit_if() -> None:
q = QuantumRegister(5)
c = ClassicalRegister(5)
qc = QuantumCircuit(q, c)
qc.x(q[2]).c_if(c, 1)
circ = qiskit_to_circuit(qc)
op = circ[0]
assert isinstance(op, qf.If)
assert op.element.name == "X"
assert op.key == c
assert op.value == 1
def test_circuit_to_qiskit() -> None:
circ = qf.Circuit()
circ += qf.X(0)
circ += qf.Y(1)
circ += qf.Z(2)
circ += qf.Can(0.1, 0.2, 0.2, 0, 1)
circ1 = translate_to_qiskit(circ)
print()
print(qf.circuit_to_diagram(circ1))
qc = circuit_to_qiskit(circ, translate=True)
print(qc)
assert len(circ1) == len(qc)
def test_qiskitsimulator() -> None:
circ = qf.Circuit()
circ += qf.H(1)
circ += qf.X(0)
circ += qf.H(2)
circ += qf.Y(3)
circ += qf.Z(2)
circ += qf.Can(0.1, 0.2, 0.2, 0, 1)
circ += qf.V(0)
# circ += qf.V(2).H # Gets converted, but not supported by Aer simulator!?
circ += qf.CV(2, 3)
sim = QiskitSimulator(circ)
assert qf.states_close(circ.run(), sim.run())
ket0 = qf.random_state([0, 1, 2, 3])
assert qf.states_close(circ.run(ket0), sim.run(ket0))
def test_circuit_to_qasm() -> None:
circ = qf.Circuit()
circ += qf.X(0)
circ += qf.Y(1)
circ += qf.Z(2)
circ += qf.Can(0.1, 0.2, 0.2, 0, 1)
qc = circuit_to_qasm(circ, translate=True)
# print(qc)
circ2 = qasm_to_circuit(qc)
assert qf.circuits_close(circ2, circ)
circuit_to_qasm(circ2, translate=False)
# print(qc2)
# fin
| [
"[email protected]"
] | |
6dd20bbc1436d8a71d799929b1889d0f94ef56ed | cc76d01ed60e02c01fac76f147fcd09c51bc7977 | /city_scrapers/spiders/alle_improvements.py | b30e9485f81d88c7139ceebadd5dd6cf879266ac | [
"MIT"
] | permissive | mgermaine93/city-scrapers-pitt | bab2b16f058d061fc34896829715279d711fbab1 | 065221765e0153df867cf10d3558df09c627aed1 | refs/heads/master | 2020-08-06T09:17:51.576690 | 2020-02-22T17:19:36 | 2020-02-22T17:19:36 | 212,921,375 | 1 | 0 | MIT | 2019-10-05T00:10:11 | 2019-10-05T00:10:10 | null | UTF-8 | Python | false | false | 5,205 | py | import datetime
import re
from urllib.parse import urljoin
from city_scrapers_core.constants import NOT_CLASSIFIED
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
from scrapy.utils.response import get_base_url
RE_URL = re.compile(r'(?P<date>(\d{1,2}-\d{1,2}-\d{1,2}))-(?P<dtype>(\w+)).aspx')
def construct_dt(date_str, time_str):
return datetime.datetime.strptime('{} {}'.format(date_str, time_str), '%B %d, %Y %I:%M %p')
class AlleImprovementsSpider(CityScrapersSpider):
name = "alle_improvements"
agency = "Allegheny County Authority for Improvements in Municipalities (AIM)"
timezone = "America/New_York"
allowed_domains = ["county.allegheny.pa.us"]
start_urls = [
(
"https://www.county.allegheny.pa.us/economic-development/"
"authorities/meetings-reports/aim/meetings.aspx"
),
]
def parse(self, response):
data = response.xpath("//table[@dropzone='copy']")
time_str = self._parse_start_time(data)
date_strs = self._parse_dates(data)
location = self._parse_location(data)
assert time_str is not None
agenda_links, minute_links = self._parse_pdf_links(response)
no_item = None
for ds in date_strs:
start = construct_dt(ds, time_str)
meeting = Meeting(
title=self._parse_title(no_item),
description=self._parse_description(no_item),
classification=self._parse_classification(no_item),
start=start,
end=self._parse_end(no_item),
all_day=self._parse_all_day(no_item),
time_notes=self._parse_time_notes(no_item),
location=location,
links=self._parse_links(ds, agenda_links, minute_links),
source=self._parse_source(response),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_title(self, item):
"""Parse or generate meeting title."""
return (
"Authority For Improvements In Municipalities Board Of Directors "
"Regular And Public Hearing"
)
def _parse_description(self, item):
"""Parse or generate meeting description."""
return ""
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
return NOT_CLASSIFIED
def _parse_dates(self, data):
"""Helper to extract list of meeting dates"""
raw = data.xpath(".//td[contains(., 'Schedule')]/following-sibling::td//p/text()").extract()
return [' '.join(r.strip().split()) for r in raw if r.strip()]
def _parse_start_time(self, data):
"""Helper to extract time str of meeting"""
tmp = data.xpath(".//td[contains(., 'Time')]/following-sibling::td/text()").extract_first()
return ' '.join(tmp.split())
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return None
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False."""
return False
def _parse_location(self, item):
"""Parse or generate location."""
raw = [
r.strip() for r in
item.xpath(".//td[contains(., 'Location')]/following-sibling::td/text()").extract()
]
return {
"address": '\n'.join(raw[1:]),
"name": raw[0],
}
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
def _parse_pdf_links(self, response):
"""Generate dict of (date, link) key values for agenda and minutes"""
urls = response.xpath(
'//a[contains(@href, "-minutes.aspx") or contains(@href, "-agenda.aspx")]/@href'
).extract()
agendas = {}
minutes = {}
for url in urls:
tmp = url.split('/')[-1]
try:
parsed = RE_URL.search(tmp).groupdict()
except Exception:
continue
dtype = parsed.get('dtype')
date = parsed.get('date')
if dtype is None or date is None:
continue
full_url = urljoin(get_base_url(response), url)
if dtype == 'minutes':
minutes[date] = full_url
elif dtype == 'agenda':
agendas[date] = full_url
return agendas, minutes
def _parse_links(self, date_str, agenda_links, minute_links):
links = []
dsx = datetime.datetime.strptime(date_str, "%B %d, %Y").strftime('%m-%d-%y')
if dsx in agenda_links:
links.append({"href": agenda_links[dsx], "title": "Agenda {}".format(dsx)})
if dsx in minute_links:
links.append({"href": minute_links[dsx], "title": "Minutes {}".format(dsx)})
return links
| [
"[email protected]"
] | |
8467f2528c243c59a8ebadf9fe3d46467de150c2 | 7edf4672013e38997b85baa4d25a6a96d69973e2 | /mon_application/views.py | 35990c65a53e2d5c3bc5c2f235fd3b0ebdd32a01 | [] | no_license | Sahiralelo242/eces | b7062388523a57c9156d295bdb898b9769b8a8fe | 224b2c14810aca8d4011eb8af4eda58c0e801d9b | refs/heads/master | 2020-06-05T23:27:24.120107 | 2019-06-18T16:48:10 | 2019-06-18T16:48:10 | 192,574,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from .models import etudiant as et
def index(request):
# return HttpResponse('<h1>Hello : Accueil</h1>')
return render(request, 'mon_application/index.html')
def all_etudiant(request):
etudiants = et.objects.all().order_by("Option")
return render(request, 'mon_application/etudiants.html', {'etudiants':etudiants})
def etudiant(request, id):
number = int(id)
student = get_object_or_404(et, id=number)
# return HttpResponse('<h1>Hello : Etudiant id('+str(id)+') </h1>')
return render(request, 'mon_application/etudiant.html', {'student' : student})
| [
"[email protected]"
] | |
20881dbf39aee2b47bb7e25a010cd73e49dcb880 | 83d986f0a6be9dae3d2833a7a7fb04a9af75b649 | /manage.py | db9eb395a8c365413bc6c9000da33a2ef3f4e977 | [] | no_license | anushabobba1/my-first-blog | 1d613dc4321ef325988779e550fcfd628c7965a4 | 8d7669a9f75f032ddf067e286ac5dfa744109e2f | refs/heads/master | 2020-03-26T03:53:42.578572 | 2018-08-13T08:29:00 | 2018-08-13T08:29:00 | 144,475,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "secondpj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
4ccfbf9e663df0a56be2a4fbeb8c8b3741c34d58 | c404dce8809d1a1d9828a5c92d3eede96bad5487 | /igdiscover/group.py | 25b5e13c6704d03045effe0bd3b63a6f42c8d3c3 | [
"MIT"
] | permissive | mateuszatki/IgDiscover | 8b560297d64e5b9e2c65408c43214cc78f235558 | b6f1bdcdf75ddae2af154d41bb21fedff60115a1 | refs/heads/master | 2020-03-29T08:59:11.699494 | 2018-11-19T21:51:50 | 2018-11-19T21:51:51 | 149,736,075 | 0 | 0 | null | 2018-09-21T08:46:21 | 2018-09-21T08:46:21 | null | UTF-8 | Python | false | false | 12,025 | py | """
Group sequences that share a barcode (molecular identifier, MID)
Since the same barcode can sometimes be used by different sequences, the CDR3
sequence can further be used to distinguish sequences. You can choose between
using either a 'pseudo CDR3' sequence, which encompasses by default bases 80
to 61 counted from the 3' end. Or you can use the real CDR3 detected with a
regular expression.
If grouping by CDR3s is enabled, sequences with identical barcode and CDR3
must additionally have a similar length. If the length differs by more than
2 bp, they are put into different groups.
The barcode can be in the 5' end or the 3' end of the sequence.
Use --trim-g to remove initial runs of G at the 5' end (artifact from RACE protocol).
These are removed after the barcode is removed.
For all the found groups, one sequence is output to standard output (in FASTA
format). Which sequence that is depends on the group size:
- If the group consists of a single sequence, that sequence is output
- If the group consists of two sequences, one of them is picked randomly
- If the group has at least three sequences, a consensus is computed. The
consensus is output if it contains no ambiguous bases. Otherwise, also here a
random sequence is chosen.
"""
# NOTES
#
# - Different lengths of the initial G run cannot be used to distinguish sequences
# since they can come from polymerase problems in homopolymers.
# - There are some indels in homopolymers (probably PCR problem)
# - There are also regular sequencing errors in the initial run of G nucleotides.
# - Some paired reads aren’t correctly merged into single reads. They end up being
# too long.
# - When grouping by barcode and pseudo CDR3, sequence lengths vary within groups.
# However, this affects only ~1% of sequences, so it is not necessary to compute
# a multiple alignment. Just taking the consensus will drown the incorrect
# sequences, at least if the group size is large.
# - It does not hurt to reduce the minimimum number of sequences per group for
# taking a consensus to 2, but it also does not help much (results in 0.5% more
# sequences). (The consensus is only successful if both sequences are identical.)
# However, since this is also a simple way to deal with exact duplicates, we do
# it anyway and can then skip the separate duplicate removal step (VSEARCH).
# TODO
# - Use pandas.DataFrame
import csv
import sys
import logging
from collections import Counter, defaultdict
from contextlib import ExitStack
from itertools import islice
import json
from sqt.align import consensus
from sqt import SequenceReader
from xopen import xopen
from .species import find_cdr3
from .cluster import Graph
from .utils import slice_arg
# minimum number of sequences needed for attempting to compute a consensus
MIN_CONSENSUS_SEQUENCES = 3
logger = logging.getLogger(__name__)
def add_arguments(parser):
arg = parser.add_argument
group = parser.add_mutually_exclusive_group()
group.add_argument('--real-cdr3', action='store_true', default=False,
help='In addition to barcode, group sequences by real CDR3 (detected with regex).')
group.add_argument('--pseudo-cdr3', nargs='?', default=None,
type=slice_arg, const=slice(-80, -60), metavar='START:END',
help='In addition to barcode, group sequences by pseudo CDR3. '
'If START:END is omitted, use -80:-60.')
arg('--groups-output', metavar='FILE', default=None,
help='Write tab-separated table with groups to FILE')
arg('--plot-sizes', metavar='FILE', default=None,
help='Plot group sizes to FILE (.png or .pdf)')
arg('--limit', default=None, type=int, metavar='N',
help='Limit processing to the first N reads')
arg('--trim-g', action='store_true', default=False,
help="Trim 'G' nucleotides at 5' end")
arg('--minimum-length', '-l', type=int, default=0,
help='Minimum sequence length')
arg('--barcode-length', '-b', type=int, default=12,
help="Length of barcode. Positive for 5' barcode, negative for 3' barcode. Default: %(default)s")
arg('--json', metavar="FILE", help="Write statistics to FILE")
arg('fastx', metavar='FASTA/FASTQ',
help='FASTA or FASTQ file (can be gzip-compressed) with sequences')
def hamming_neighbors(s):
"""Return sequences that are at hamming distance 1 and return also s itself"""
for i in range(len(s)):
for c in 'ACGT':
if s[i] != c:
yield s[:i] + c + s[i+1:]
yield s
def cluster_sequences(records):
"""
Single-linkage clustering. Two sequences are linked if
- their (pseudo-) CDR3 sequences have a hamming distance of at most 1
- and their lengths differs by at most 2.
"""
if len(records) == 1: # TODO check if this helps
return [records]
# Cluster unique CDR3s first
cdr3s = set(r.cdr3 for r in records)
sorted_cdr3s = sorted(cdr3s) # For reproducibility
graph = Graph(sorted_cdr3s)
for cdr3 in sorted_cdr3s:
for neighbor in hamming_neighbors(cdr3):
if neighbor in cdr3s:
graph.add_edge(cdr3, neighbor)
cdr3_components = graph.connected_components()
# Maps CDR3 sequence to list of records of sequence that have that CDR3
cdr3_records = defaultdict(list)
for r in records:
cdr3_records[r.cdr3].append(r)
components = []
for cdr3_component in cdr3_components:
component_records = []
for cdr3 in cdr3_component:
component_records.extend(cdr3_records[cdr3])
component_records.sort(key=lambda r: len(r.sequence))
component = []
prev_length = None
for r in component_records:
l = len(r.sequence)
if prev_length is not None and l > prev_length + 2:
# Start a new component
components.append(component)
component = []
component.append(r)
prev_length = l
if component:
components.append(component)
assert sum(len(component) for component in components) == len(records)
assert all(components) # Components must be non-empty
return components
GROUPS_HEADER = ['barcode', 'cdr3', 'name', 'sequence']
def write_group(csvfile, barcode, sequences, with_cdr3):
for sequence in sequences:
row = [barcode, sequence.name.split(maxsplit=1)[0], sequence.sequence]
if with_cdr3:
row[1:1] = [sequence.cdr3]
csvfile.writerow(row)
csvfile.writerow([])
def collect_barcode_groups(
fastx, barcode_length, trim_g, limit, minimum_length, pseudo_cdr3, real_cdr3):
"""
fastx -- path to FASTA or FASTQ input
"""
group_by_cdr3 = pseudo_cdr3 or real_cdr3
if group_by_cdr3:
cdr3s = set()
# Map barcodes to lists of sequences
barcodes = defaultdict(list)
n = 0
too_short = 0
regex_fail = 0
with SequenceReader(fastx) as f:
for record in islice(f, 0, limit):
if len(record) < minimum_length:
too_short += 1
continue
if barcode_length > 0:
barcode = record.sequence[:barcode_length]
unbarcoded = record[barcode_length:]
else:
barcode = record.sequence[barcode_length:]
unbarcoded = record[:barcode_length]
if trim_g:
# The RACE protocol leads to a run of non-template Gs in the beginning
# of the sequence, after the barcode.
unbarcoded.sequence = unbarcoded.sequence.lstrip('G')
if unbarcoded.qualities:
unbarcoded.qualities = unbarcoded.qualities[-len(unbarcoded.sequence):]
if real_cdr3:
match = find_cdr3(unbarcoded.sequence, chain='VH')
if match:
cdr3 = unbarcoded.sequence[match[0]:match[1]]
else:
regex_fail += 1
continue
elif pseudo_cdr3:
cdr3 = unbarcoded.sequence[pseudo_cdr3]
if group_by_cdr3:
unbarcoded.cdr3 = cdr3 # TODO slight abuse of Sequence objects
cdr3s.add(cdr3)
barcodes[barcode].append(unbarcoded)
n += 1
logger.info('%s sequences in input', n + too_short + regex_fail)
logger.info('%s sequences long enough', n + regex_fail)
if real_cdr3:
logger.info('Using the real CDR3')
logger.info('%s times (%.2f%%), the CDR3 regex matched', n, n / (n + regex_fail) * 100)
elif pseudo_cdr3:
logger.info('Using the pseudo CDR3')
if group_by_cdr3:
logger.info('%s unique CDR3s', len(cdr3s))
return barcodes
def plot_sizes(sizes, path):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib
import seaborn as sns
sns.set()
fig = Figure()
matplotlib.rcParams.update({'font.size': 14})
FigureCanvas(fig)
ax = fig.add_subplot(111)
v, _, _ = ax.hist(sizes, bins=100)
ax.set_ylim(0, v[1:].max() * 1.1)
ax.set_xlabel('Group size')
ax.set_ylabel('Read frequency')
ax.set_title('Histogram of group sizes (>1)')
ax.grid(axis='x')
ax.tick_params(direction='out', top=False, right=False)
fig.set_tight_layout(True)
fig.savefig(path)
logger.info('Plotted group sizes to %r', path)
def main(args):
if args.barcode_length == 0:
sys.exit("Barcode length must be non-zero")
group_by_cdr3 = args.pseudo_cdr3 or args.real_cdr3
barcodes = collect_barcode_groups(args.fastx, args.barcode_length, args.trim_g,
args.limit, args.minimum_length, args.pseudo_cdr3, args.real_cdr3)
logger.info('%s unique barcodes', len(barcodes))
barcode_singletons = sum(1 for seqs in barcodes.values() if len(seqs) == 1)
logger.info('%s barcodes used by only a single sequence (singletons)', barcode_singletons)
with ExitStack() as stack:
if args.groups_output:
group_out = csv.writer(stack.enter_context(
xopen(args.groups_output, 'w')), delimiter='\t', lineterminator='\n')
group_out.writerow(GROUPS_HEADER)
else:
group_out = None
too_few = 0
n_clusters = 0
n_singletons = 0
n_consensus = 0
n_ambiguous = 0
sizes = []
for barcode in sorted(barcodes):
sequences = barcodes[barcode]
if len(sequences) != len(set(s.name for s in sequences)):
logger.error('Duplicate sequence records detected')
sys.exit(1)
if group_by_cdr3:
clusters = cluster_sequences(sequences) # it’s a list of lists
else:
# TODO it would be useful to do the clustering by length that cluster_sequences() does
clusters = [sequences]
n_clusters += len(clusters)
for cluster in clusters:
sizes.append(len(cluster))
if group_out:
write_group(group_out, barcode, cluster, with_cdr3=group_by_cdr3)
if len(cluster) == 1:
n_singletons += 1
if len(cluster) < MIN_CONSENSUS_SEQUENCES:
too_few += 1
sequence = cluster[0].sequence
name = cluster[0].name
if group_by_cdr3:
cdr3 = cluster[0].cdr3
else:
cons = consensus({s.name: s.sequence for s in cluster}, threshold=0.501)
if 'N' in cons:
# Pick the first sequence as the output sequence
sequence = cluster[0].sequence
name = cluster[0].name
if group_by_cdr3:
cdr3 = cluster[0].cdr3
n_ambiguous += 1
else:
sequence = cons
n_consensus += 1
if group_by_cdr3:
cdr3 = Counter(cl.cdr3 for cl in cluster).most_common(1)[0][0]
name = 'consensus{}'.format(n_consensus)
name = name.split(maxsplit=1)[0]
if name.endswith(';'):
name = name[:-1]
if group_by_cdr3:
print('>{};barcode={};cdr3={};size={};\n{}'.format(name, barcode,
cdr3, len(cluster), sequence))
else:
print('>{};barcode={};size={};\n{}'.format(name, barcode,
len(cluster), sequence))
logger.info('%d clusters (%d singletons)', n_clusters, n_singletons)
logger.info('%d consensus sequences computed (from groups that had at least %d sequences)',
n_consensus + n_ambiguous, MIN_CONSENSUS_SEQUENCES)
logger.info('%d of those had no ambiguous bases', n_consensus)
if args.groups_output:
logger.info('Groups written to %r', args.groups_output)
assert sum(sizes) == sum(len(v) for v in barcodes.values())
if args.json:
sizes_counter = Counter(sizes)
stats = {
'unique_barcodes': len(barcodes),
'barcode_singletons': barcode_singletons,
'groups_written': n_clusters,
'group_size_1': sizes_counter[1],
'group_size_2': sizes_counter[2],
'group_size_3plus': sum(v for k, v in sizes_counter.items() if k >= 3),
}
with open(args.json, 'w') as f:
json.dump(stats, f, indent=2)
print(file=f)
if args.plot_sizes:
plot_sizes(sizes, args.plot_sizes)
| [
"[email protected]"
] | |
0c3bcf5e3e846d0f1956e7384598d303ebdbd8ca | f0e31656a1bf16b1c0e6fd3b43812bf69a201b69 | /song_match/exceptions/exceptions.py | 2a700d6b1625e47b5a16957eb804b6be89f4b111 | [
"MIT"
] | permissive | gbroques/cozmo-song-match | 0afb6cbc52447ceea3a7eb14032b46b78719481a | 7cea763cc3fe63adc3a7e2dc226bd8b48ce72f71 | refs/heads/master | 2022-12-10T06:02:44.462450 | 2018-05-15T12:19:06 | 2018-05-15T12:19:06 | 125,294,282 | 8 | 3 | MIT | 2022-12-08T00:56:00 | 2018-03-15T01:14:23 | Python | UTF-8 | Python | false | false | 1,492 | py | class InvalidNote(ValueError):
"""Raise if an invalid note occurs."""
def __init__(self, note):
message = self._message(note)
super(InvalidNote, self).__init__(message)
@staticmethod
def _message(note: str) -> str:
return 'Invalid note "' + note + '".'
class MixerNotInitialized(ValueError):
"""Raise if constructing a :class:`~song_match.song.note.Note` instance before initializing the mixer."""
def __init__(self):
message = self._message()
super(MixerNotInitialized, self).__init__(message)
@staticmethod
def _message() -> str:
return ('Mixer not initialized. Please call Note.init_mixer() ' +
'before constructing a new Note instance.')
class InvalidEffectType(ValueError):
"""Raise if an invalid effect type occurs."""
def __init__(self, effect_type):
message = self._message(effect_type)
super(InvalidEffectType, self).__init__(message)
@staticmethod
def _message(effect_type: str) -> str:
return 'Invalid effect type "' + effect_type + '".'
class InvalidGameEffectSound(ValueError):
"""Raise if an invalid game effect sound occurs."""
def __init__(self, game_effect_sound):
message = self._message(game_effect_sound)
super(InvalidGameEffectSound, self).__init__(message)
@staticmethod
def _message(game_effect_sound: str) -> str:
return 'Invalid game effect sound "' + game_effect_sound + '".'
| [
"[email protected]"
] | |
e6401c6678c0d5873fcba13164503fa09b3e3e4f | 392c10f6edecc292ecf2104f70de729bfa16f980 | /Ship.py | 63288c985c8706f4bf300d33f2aab6bb430fbeff | [] | no_license | zhipeter/Pygame | cfc109e7b573677ae438e8e707a9bf01d060268b | 527d10292fc59536978aacb83114e07a3992816e | refs/heads/master | 2021-05-04T13:01:33.922278 | 2018-02-04T07:51:59 | 2018-02-04T07:51:59 | 120,306,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | '''
This is a Class of Ship
'''
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
''' a Class of Ship'''
def __init__(self, set, screen):
'''init ship'''
super(Ship, self).__init__()
self.screen = screen
self.set = set
# load ship
self.image = pygame.image.load('Ship.png')
self.rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
# puy ship on bottom
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# at the 'center' save float
self.center = float(self.rect.centerx)
# move flag
self.moving_right = False
self.moving_left = False
def update(self):
'''update ship location'''
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.set.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.set.ship_speed_factor
self.rect.centerx = self.center
def blitme(self):
'''draw ship on the point'''
self.screen.blit(self.image, self.rect)
def center_ship(self):
'''center ship'''
self.center = self.screen_rect.centerx
| [
"[email protected]"
] | |
e53d3f8760e6f3fc1d03e31ca0d6e95e770ca119 | 14675f0c66fb4f4eeaa6ad1e8e691b9edf8f0bdb | /All other combo programs/shallow_copy_vs_deep_copy.py | 90fde947dfc5fb9fd91708b12dd8fa46e8ad2df8 | [] | no_license | abhishekjoshi1991/Python_Learning | 9a94529643eac7394615289e2ecd96106e70ddb8 | a74293d0776304638b5cf976b3534481e57b17f2 | refs/heads/master | 2023-04-16T02:21:30.588052 | 2021-04-21T13:58:29 | 2021-04-21T13:58:29 | 360,176,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | #Shallow copy vs deep copy
'''
In Python, we use = operator to create a copy of an object.
We may think that this creates a new object; it doesn't.
It only creates a new variable that shares the reference
of the original object (very imp line)
if we say
l1=[1,2]
l2=l1
l2.append(10)
print(l1)
print(l2)
here assignment operator =, changes both objects l1 and l2,
we dont want that during copying, so for mutable objects like
lsit, sets etc we have to use shallow and deep copy
shallow copy:https://www.youtube.com/watch?v=zHMViQcLFjE
deep copy: https://www.youtube.com/watch?v=Riv6UJ6GFDM
'''
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 1. Shallow Copy
'''
shallow copy creates new object (e.g new list) which is derived
using memory reference number of original elements.
shallow copy can be created using 4 methods:
1. using built in functions of list, sets etc
2. using slicing operator
3. using list comprehensions
4. using copy function from copy module
'''
#1. using buit in function
l1=[1,2,3,4]
l2=list(l1)
print('new list:',l2)
l2.append(10)
print('original list:',l1)
print('new list after append:',l2)
print('\n')
#2. using slicing method
l1=[1,2,3,4]
l2=l1[:]
print('new list:',l2)
l2.append(10)
print('original list:',l1)
print('new list after append:',l2 )
print('\n')
#3. using list comprehensions
l1=[1,2,3,4]
l2=[x for x in l1]
print('new list:',l2)
l2.append(10)
l2[2]='a'
print('original list:',l1)
print('new list after append:',l2 )
print('\n')
#4. using copy module
import copy
l1=[1,2,3,4]
l2=copy.copy(l1)
print('new list:',l2)
l2.append(10)
l2[2]='abhi'
print('original list:',l1)
print('new list after append:',l2 )
print('\n')
'''
here in all above example, l1 does not change even if
we change the l2 by append method other other methods,
thus shallow copy creates new object(whole new list with other
memory location)
but
but
but
for nested objects (like nested list) it wont't work if we try to
change nested elements, then l1 will change as per the l2.
see below example
'''
#shallow copy on nested list-type 1
print('shallow copy on nested lists- type 1')
import copy
l1=[[1,2,3],4,5,6]
l2=copy.copy(l1)
print(l2)
l2.append(100)#here 100 is appended to non nested items
print(l1)
print(l2)
print('\n')
'''
here again only l2 is changed and not l1, as we have appended
items outside the nesting
'''
#shallow copy on nested list-type 2
print('shallow copy on nested lists- type 2')
import copy
l1=[[1,2,3],4,5,6]
l2=copy.copy(l1)
print(l2)
l2[0][1]='abhi'#here index value is changed within nested elements
l2[0].append(150)
print(l1)
print(l2)
print(id(l1)==id(l2))
print('\n')
'''
so here list l1 and l2 both gets changed, this is behaviour of shallow
copy on nested elements
though it altering both lists, but memory location
will be always different.
so to eliminate this drawback, deep copy is used
'''
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#2. Deep copy
'''
in deep copy, new object is created as shallow copy that means
memory location of l1 and l2 will be different.
but for nested elements , deep copy keeps original object
as it is , only changes happen in new object.
so basically deep copy is used for nested items.
its behaviour for non nested items will be same as shallow copy
'''
import copy
l1=[[1,2,3],4,5,6]
l2=copy.deepcopy(l1)
print(l2)
l2.append(150)#appending 150 to non nested items
print(l1)
print(l2)
print('\n')
'''here no effect as we adding elements to non nested items'''
import copy
l1=[[1,2,3],4,5,6]
l2=copy.deepcopy(l1)
print(l2)
l2[0].append(150)#appending 150 to nested items
print(l1)
print(l2)
print('\n')
'''here l1 will not change, only l2 will change'''
| [
"[email protected]"
] | |
be96165ec459e113f9297b64696002b7e98bdbaf | 3c7364af443f88d86d432ba8948349babac4c693 | /nflpool/services/update_nflschedule_service.py | 7a00e7fa8a34f9b1ed8b749d57f9c4b36da87d77 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | prcutler/nflpool | a98f0b462315fa8ce4eb8cf45851d6959e7b493b | a71498befb09601ab184f0db75a548860bc201c6 | refs/heads/master | 2021-07-20T02:38:36.874692 | 2021-06-02T12:00:01 | 2021-06-02T12:00:01 | 48,953,770 | 9 | 6 | MIT | 2021-06-02T12:00:02 | 2016-01-03T17:05:59 | Python | UTF-8 | Python | false | false | 2,083 | py | import requests
from nflpool.data.dbsession import DbSessionFactory
from nflpool.data.nflschedule import NFLSchedule
import nflpool.data.secret as secret
from requests.auth import HTTPBasicAuth
from nflpool.data.seasoninfo import SeasonInfo
import pendulum
"""After updating to a new season, get the NFL game schedule for all 17 weeks including the date of each game
to the database"""
class UpdateScheduleService:
@classmethod
def update_nflschedule(
cls,
season: int,
game_id: int,
game_date: str,
away_team: int,
home_team: int,
week: int,
):
session = DbSessionFactory.create_session()
season_row = session.query(SeasonInfo).filter(SeasonInfo.id == "1").first()
season = season_row.current_season
response = requests.get(
"https://api.mysportsfeeds.com/v2.0/pull/nfl/"
+ str(season)
+ "-regular/games.json",
auth=HTTPBasicAuth(secret.msf_api, secret.msf_v2pw),
)
schedule_query = response.json()
team_schedule = schedule_query["games"]
print(type(team_schedule), team_schedule)
x = 0
for schedule in team_schedule:
game_id = team_schedule[x]["schedule"]["id"]
week = team_schedule[x]["schedule"]["week"]
game_time = team_schedule[x]["schedule"]["startTime"]
away_team = team_schedule[x]["schedule"]["awayTeam"]["id"]
home_team = team_schedule[x]["schedule"]["homeTeam"]["id"]
game_date = pendulum.parse(game_time)
x = x + 1
season_row = session.query(SeasonInfo).filter(SeasonInfo.id == "1").first()
season = season_row.current_season
add_schedule = NFLSchedule(
game_id=game_id,
game_date=game_date,
away_team=away_team,
home_team=home_team,
week=week,
season=season,
)
session.add(add_schedule)
session.commit()
| [
"[email protected]"
] | |
578e926d0c44c40ace20b84c84f3ca157bf41e57 | d85822d6d53f6c2bd258f91ec6d38cfb9eb6b50a | /BrixAIUtils/FSM.py | 86abc9077e7b3a1a50c1263d14f10b4f4df8e178 | [] | no_license | hanhha/BAI | 82766c8cef65f5840a73e23e19c93bd5bb0db3de | 6e5ac718bd29426d6dec1a84ce23ad9450543b5e | refs/heads/master | 2021-05-07T16:04:00.852814 | 2019-02-13T14:58:34 | 2019-02-13T14:58:34 | 108,492,641 | 1 | 0 | null | 2018-09-20T14:52:52 | 2017-10-27T03:03:28 | Python | UTF-8 | Python | false | false | 1,356 | py | class State:
def __init__ (self, name):
self.name = name
def __str__ (self): return self.name
def __cmp__ (self, other):
return cmp (self.name, other.name)
def __hash__ (self):
return hash(self.name)
def run(self, input, args):
assert 0, "run not implemented"
def next(self, input, args):
assert 0, "next not implemented"
class StateMachine:
def __init__(self, initialState):
self._currentState = initialState
self.state_transition_en = True
# Template method
def fastswitch (self, tempState):
self._prvState = self._currentState
self._currentState = tempState
self.state_transition_en = False
print (self._currentState)
def withdrawwork (self):
self._currentState = self._prvState
self.state_transition_en = True
print (self._currentState)
def on_event (self, input, args):
if self.state_transition_en:
self._currentState = self.currentState.next(input, args)
self._currentState.run (input, args)
else:
self.transition_disable_info()
def transition_disable_info (self):
pass
@property
def currentState(self):
return self._currentState
class Action:
def __init__ (self, action, prefix):
self.action = prefix + "_" + action
def __str__ (self): return self.action
def __cmp__ (self, other):
return cmp (self.action, other.action)
def __hash__ (self):
return hash(self.action)
| [
"[email protected]"
] | |
a0725e5f3d1d8a9712664bada715e224b653f8a3 | 00321376218d69611c393ded60d4489b4f6f2cde | /demoserver/comment.py | 62bfb52b2fb858bb7bb6ade6f13ff4a1b34d9f30 | [
"MIT"
] | permissive | TED-996/krait | 3d6417b129ef291de2d9d10c3dd30cf0852ca3ee | 82d8025d0daea10b01e2d3ea832c87d2b9c268e1 | refs/heads/master | 2021-01-19T07:06:28.294843 | 2017-06-27T19:34:05 | 2017-06-27T19:34:05 | 76,179,849 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import krait
import sqlite3
import datetime
post_form = krait.request.get_post_form()
name = post_form["name"]
message = post_form["text"]
krait.response = krait.ResponseRedirect("/db")
conn = sqlite3.connect(sqlite_db)
c = conn.cursor()
c.execute("insert into messages values(?, ?)", (name, message))
conn.commit()
conn.close()
new_cookie = krait.cookie.Cookie("comment_count", str(int(krait.cookie.get_cookie("comment_count", "0")) + 1))
new_cookie.set_expires(datetime.datetime.utcnow() + datetime.timedelta(minutes=1))
new_cookie.set_http_only(True)
krait.cookie.set_cookie(new_cookie)
| [
"[email protected]"
] | |
0f320c2f2deccd1de76ec49f2d0ad97cc4712791 | 17e0468172ce82075174bd81b8b71a1f4b55711f | /binding.gyp | 7c6c8976bca513a966d765c13588ed7624962477 | [] | no_license | thynson/yescrypt-napi | 5bc7f4ef99bc0ebf455ac0b90f22f5c2a5f65ec1 | 29667baf2890b60b1f913fd9fb327b92d0994192 | refs/heads/master | 2023-07-26T11:24:23.882912 | 2021-09-03T13:28:17 | 2021-09-03T13:28:17 | 401,596,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | gyp | {
"targets": [
{
"target_name": "binding",
"sources": [
"deps/yescrypt/yescrypt.h",
"deps/yescrypt/yescrypt-opt.c",
"deps/yescrypt/insecure_memzero.h",
"deps/yescrypt/insecure_memzero.c",
"deps/yescrypt/sha256.h",
"deps/yescrypt/sha256.c",
"src/yescrypt-napi.cpp"
],
"include_dirs": [
"deps/yescrypt",
"<!@(node -p \"require('node-addon-api').include\")"
],
"defines": ["NAPI_DISABLE_CPP_EXCEPTIONS"]
}
]
} | [
"[email protected]"
] | |
7fd70293457a94430158a400b39c08d977d645bc | bb24a4bda69ec65c82d8ad5dc240a67369d25505 | /meetings/admin.py | d3fbbb522f9df678016e7faef6e6d402241f2e09 | [] | no_license | KBHarris/meeting_planner | 01cc7e0d99d424b03a2af2d33b41fd9864a00e60 | 564f485901a541bc188cd2cc802980d859804089 | refs/heads/main | 2023-06-29T12:18:36.058652 | 2021-07-29T18:53:23 | 2021-07-29T18:53:23 | 390,448,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from django.contrib import admin
from .models import Meeting, Room
# Register your models here.
admin.site.register(Meeting)
admin.site.register(Room)
| [
"[email protected]"
] | |
83e3e34ab7cfc7442d87aa3441792457bae890c5 | 7e7ad12d02702cd72251bf26ad502d8ede23fb92 | /day25.py | 9fda0f12f421234e846ded3466aa0226a8a47748 | [] | no_license | snowbagoly/adventofcode2018 | 6cc9a4a1f903e62bf9de78f4c797ed69c47d5ad3 | 2ed30ac2342d1a5d7325b962cd2244215649c4a1 | refs/heads/master | 2020-04-09T03:28:08.414529 | 2018-12-27T22:10:27 | 2018-12-27T22:10:27 | 159,983,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from aocd import get_data
import re
coord_regex = re.compile("(-?\d+),(-?\d+),(-?\d+),(-?\d+)")
s = get_data(day=25,year=2018)
def calculate_distance(p1,p2):
return sum(map(lambda a,b: abs(a-b),p1,p2))
points = map(lambda line: map(int,coord_regex.search(line).group(1,2,3,4)),s.split("\n"))
edges = {i:[] for i in range(len(points))}
for i in range(len(points)-1):
for j in range(i+1,len(points)):
if calculate_distance(points[i],points[j]) <= 3:
edges[i].append(j)
edges[j].append(i)
points_left = set(range(len(points)))
counter = 0
while points_left:
counter += 1
q = [next(iter(points_left))]
points_left.remove(q[0])
while q:
p = q.pop(0)
for n in edges[p]:
if n in points_left:
q.append(n)
points_left.remove(n)
print(counter) | [
"[email protected]"
] | |
683992abaf16fac2451d15ee9054fa439fb1dd68 | f985402d90e1861852d45e65b32e0f0c7592c265 | /publisher.py | be072be1ddc9e6d8ebf31e54fd088a15fd13e020 | [] | no_license | tecd0721/example-py-docker-iothub | fce558e755ad5cfedb862e5b74381c6d3d5da595 | bbc91251ff243dda7429e78eec7ffc2b73d71030 | refs/heads/master | 2022-12-17T06:56:35.845314 | 2019-08-07T07:09:19 | 2019-08-07T07:09:19 | 294,050,246 | 0 | 0 | null | 2020-09-09T08:27:00 | 2020-09-09T08:26:59 | null | UTF-8 | Python | false | false | 695 | py | import paho.mqtt.client as mqtt
import random
#externalHosts
broker="xx.81.xx.10"
#mqtt_port
mqtt_port=1883
#mqtt_username
username="xxxxxxxx-b76f-43e9-8b35-xxxxxxxxf941:xxxxxxxx-c438-4aee-8a0f-bbc791afd307"
password="xxxxxxxxsP8VJZXBb32Z5JNwn"
def on_publish(client,userdata,result): #create function for callback
print("data published")
client= mqtt.Client() #create client object
client.username_pw_set(username,password)
client.on_publish = on_publish #assign function to callback
client.connect(broker,mqtt_port) #establish connection
client.publish("/hello",random.randint(10,30)) | [
"[email protected]"
] | |
318f0c2a62f81f10b2462cd7ecc6e8f97bd3ccfc | af2bcc0f22e6e9fb67f68b700287c21214de2bb0 | /jackenv/bin/easy_install-2.7 | e2de4f982986addf58c9d2c8ea98f5c0665ccef7 | [] | no_license | dcl67/Loanshark2 | 8bb6c2fb6d38f548dcd19ed7f4250f285800e44f | 8d00f2aa2217d8d5ae9550167e9501d60e47f81b | refs/heads/master | 2020-03-30T18:14:10.235263 | 2018-10-09T23:14:06 | 2018-10-09T23:14:06 | 151,490,176 | 0 | 0 | null | 2018-10-09T23:14:07 | 2018-10-03T22:46:31 | Python | UTF-8 | Python | false | false | 272 | 7 | #!/Users/dannylopez/repos/JackTracking/jackenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
44e90d782a35689c3cc6addec1fab28956cabdbf | 45ca434bdb9e48fdbb2cda0e7fdd9a76474117b0 | /aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/ModifyPathCacheExpiredConfigRequest.py | 984b6521e6c8e72f309754c9ac63efbb09e7dbde | [
"Apache-2.0"
] | permissive | wanyanzhenjiang/aliyun-openapi-python-sdk | e41e9937ad3f851e5a58f6bea95663e88f7fee13 | 4a5bf1b35f2395d047ead4444ea46721976bdd24 | refs/heads/master | 2020-12-30T10:37:55.789911 | 2017-07-27T06:55:15 | 2017-07-27T06:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyPathCacheExpiredConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'ModifyPathCacheExpiredConfig')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_ConfigID(self):
return self.get_query_params().get('ConfigID')
def set_ConfigID(self,ConfigID):
self.add_query_param('ConfigID',ConfigID)
def get_CacheContent(self):
return self.get_query_params().get('CacheContent')
def set_CacheContent(self,CacheContent):
self.add_query_param('CacheContent',CacheContent)
def get_TTL(self):
return self.get_query_params().get('TTL')
def set_TTL(self,TTL):
self.add_query_param('TTL',TTL)
def get_Weight(self):
return self.get_query_params().get('Weight')
def set_Weight(self,Weight):
self.add_query_param('Weight',Weight) | [
"[email protected]"
] | |
f7f37d52414c6009fd2d9d1a8253c3b6eb3dae7b | 93d1d91f56daccc42c42ccabf853d9d1ab4b199f | /Models/KeywordByUser.py | 7cd1ddb2d2093e39d4561fb9e8a01ef67ef2f325 | [] | no_license | Rhaall/sparetime-backend | 77421fc09df80b21db087479fce5eb3bab0b78c4 | 908737672eaab49b30f5d85accc1942b8300fb56 | refs/heads/master | 2023-01-22T22:27:20.645966 | 2020-11-20T00:15:37 | 2020-11-20T00:15:37 | 313,592,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from sqlalchemy import Column, Integer, Float, ForeignKey
from sqlalchemy.orm import relationship
from database.database import Base
class KeywordByUser(Base):
__tablename__ = 'keyword_by_user'
id = Column(Integer, primary_key=True)
id_user = Column(Integer, ForeignKey('user.id'), nullable=False)
id_keyword = Column(Integer, ForeignKey('keyword.id'), nullable=False)
pos_rate = Column(Float(), nullable=False)
neg_rate = Column(Float(), nullable=False)
neutral_rate = Column(Float(), nullable=False)
count = Column(Integer, nullable=False)
def __repr__(self):
return '<KeywordByUser %r>' % self.id
| [
"[email protected]"
] | |
a980971232fa5a9d366c0b54d935f33a545c7845 | 8bb3349040ac58c1eb751eead2894e013e41a25b | /pirx/plugins/__init__.py | cc395d378cb8beee98c0dca0b4020d45a6004453 | [
"BSD-3-Clause"
] | permissive | rafalp/Pirx | 45bae20f322a5e05d258953cfa1033f6a7f19c40 | adefea61a16a3c63b508b915169669d14baa0730 | refs/heads/master | 2022-08-14T15:04:30.779606 | 2020-05-16T22:22:48 | 2020-05-16T22:22:48 | 259,632,746 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | from .loader import PluginLoader
plugins = PluginLoader()
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.