blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e599e8b5fd751743f099de05205732cd4bc081c | e77b92df446f0afed18a923846944b5fd3596bf9 | /Programers_algo/Graph/kruskal.py | d1d113114a54256cb73edd9e6cc8c3d493ea7ea5 | [] | no_license | sds1vrk/Algo_Study | e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e | fbbc21bb06bb5dc08927b899ddc20e6cde9f0319 | refs/heads/main | 2023-06-27T05:49:15.351644 | 2021-08-01T12:43:06 | 2021-08-01T12:43:06 | 356,512,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # 크루스칼 알고리즘
# 가장 적은 비용으로 모든 노드를 연결
# UnionFind
import sys
sys.stdin=open("input.txt","r")
# 특정 원소가 속한 부모 집합 찾기
def find_parent(parent,x):
# 루트 노트가 아니라면, 루트 노드를 찾을떄까지 재귀적으로 후촐
if parent[x]!=x:
parent[x]=find_parent(parent,parent[x])
# parent[x]가 갱신됨
return parent[x]
# 두 원소가 속한 집합 합치기
def union_parent(parent,a,b):
a=find_parent(parent,a)
b=find_parent(parent,b)
if a<b:
parent[b]=a
else :
parent[a]=b
# 노드의 개수와 간선(Union 연산)의 개수 입력받기
v,e=map(int,input().split())
parent=[0]*(v+1)
edges=[]
result=0
for i in range(1,v+1):
parent[i]=i
for _ in range(e):
a,b,cost=map(int,input().split())
# 비용순으로 정렬하기
edges.append((cost,a,b))
# 간선을 비용순으로 정렬
edges.sort()
# 간선을 하나씩 확인하며
for edge in edges:
cost,a,b=edge
# 사이클이 발생하지 않는 경우에만 집합에 포함
if find_parent(parent,a)!=find_parent(parent,b):
union_parent(parent,a,b)
result+=cost
print(result)
| [
"[email protected]"
] | |
8b1cec12c2e95e9c6e1aa2b3a1dc3890902dccb6 | 6a69d57c782e0b1b993e876ad4ca2927a5f2e863 | /vendor/samsung/common/packages/apps/SBrowser/src/media/cast/test/utility/utility.gyp | 93e2f14842b82bec4a5561b751635ce8a87d4a77 | [
"BSD-3-Clause"
] | permissive | duki994/G900H-Platform-XXU1BOA7 | c8411ef51f5f01defa96b3381f15ea741aa5bce2 | 4f9307e6ef21893c9a791c96a500dfad36e3b202 | refs/heads/master | 2020-05-16T20:57:07.585212 | 2015-05-11T11:03:16 | 2015-05-11T11:03:16 | 35,418,464 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 891 | gyp | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cast_test_utility',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
],
'dependencies': [
'<(DEPTH)/ui/gfx/gfx.gyp:gfx',
'<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
],
'sources': [
'<(DEPTH)/media/cast/test/fake_single_thread_task_runner.cc',
'<(DEPTH)/media/cast/test/fake_single_thread_task_runner.h',
'input_builder.cc',
'input_builder.h',
'audio_utility.cc',
'audio_utility.h',
'video_utility.cc',
'video_utility.h',
], # source
},
],
} | [
"[email protected]"
] | |
b8ed59691aeefe3e4e9152934a20f1a38dd76ec1 | 135254b8c00935efd0efd33c708ce69470e23741 | /Hard/149. Max Points on a Line.py | b3ccdb55fe8727f1317734272d23f558f8fa762e | [] | no_license | MinecraftDawn/LeetCode | 4974e6f96612f01e4774ecd5c30bc42dfff79467 | 0404bcce27ff363430e6ab71dbc27a69055fd261 | refs/heads/master | 2021-06-19T05:50:08.000396 | 2021-06-14T05:57:09 | 2021-06-14T05:57:09 | 188,446,485 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | class Solution:
def maxPoints(self, points: list) -> int:
length = len(points)
if length < 3: return length
ans = 0
for i in range(length-1):
for j in range(i+1,length):
x1,y1 = points[i]
x2,y2 = points[j]
dx = x2-x1
dy = y2-y1
count = 0
for k in range(length):
x3,y3 = points[k]
if dx == 0:
if x3 == x1:
count += 1
else:
if dy*(x3-x1) == dx*(y3-y1):
count += 1
ans = max(ans,count)
return ans | [
"[email protected]"
] | |
a714c5b7c7804e9c67533f18dde8b0eedd53ddb4 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /turtle_天花乱坠动画2.py | fcf7f1dd160a5abe55f5b10729cabd0a1f397bb2 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | # 在这里写上你的代码 :-)
from turtle import *
import math
x0,y0=0,0
u,v,w=.1,.2,.3
def Trgl(x,y,q):
global x0,y0
rgb="#"+hex(0x100000+(q**3) % 0xEFFFFF)[2:]
clr="#"+hex(0x100000+(q**2) % 0xEFFFFF)[2:]
pu()
goto(x0,y0)
pd()
pensize(6)
pencolor(rgb)
goto(x,y)
lt(.1) #旋转(角度制)
fillcolor(clr) #填充色
begin_fill() #开始填充
circle(5+(int)(q*q/1000) % 25, steps=3)
end_fill() #结束填充
x0,y0=x,y
def Draw():
ht() #隐藏乌龟
tracer(False) #快速直接画
global u,v,w,x0,y0
u,v,w=u+.01,v+.02,w+.03
#指定区间逐一画图:
clear()
for q in range(-360,366,6):
x=200*math.cos(u+q/60)+100*math.cos(v-q/90)+50*math.cos(-w+q/45)
y=200*math.sin(u+q/60)+100*math.sin(v-q/90)+50*math.sin(-w+q/45)
Trgl((int)(x),(int)(y),q)
update()
ontimer(Draw(), 20)
Draw()
| [
"[email protected]"
] | |
af9f7a13d84ec8d9dcd93612ecfb7ab3e5a9bcd3 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/baekjoon/10991.py | 8b5a24ec3813157fd199bc0c010da37a72249e58 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | n = int(input())
for i in range(n):
for j in range(i + 1, n):
print(" ", end="")
for j in range(i+1):
print("* ", end="")
print()
| [
"[email protected]"
] | |
7b218f14178cea2f621b90959936835954be0bd5 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/SWEA_문제/swea_1211_Ladder2_풀이.py | 8caa8091e792d5a7c8190b1981bcb752ee8bf14e | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | dc = [-1, 1] # 좌우
def dir_check(r, c): # 방향 체크.
for i in range(2):
nc = c + dc[i]
if 0 <= nc < 100 and ladder[r][nc] == 1:
return i
return 2
def go(st):
col = st_pos[st]
cnt = 0
idx = st
for i in range(100):
d = dir_check(i, col)
if d < 2:
idx += dc[d]
cnt += abs(col - st_pos[idx])
col = st_pos[idx]
cnt += 1
return cnt
for tc in range(10):
# 테스트케이스 번호 입력
tc_num = int(input())
# 2차원 리스트 입력
ladder = [list(map(int, input().split())) for _ in range(100)]
# 시작 좌표를 담을 리스트
st_pos = []
# 시작 좌표를 다 담았다.
for i in range(100):
if ladder[0][i] == 1:
st_pos.append(i)
# 임의의 큰값으로 초기화
min_value = 987654321
# 어차피 정답으로 사용될거니 안쓰이는수 아무거나로 초기화
ans_idx = -1
for i in range(len(st_pos)):
tmp = go(i)
if tmp <= min_value:
min_value = tmp
ans_idx = st_pos[i]
print("#{} {}".format(tc_num, ans_idx)) | [
"[email protected]"
] | |
2fd557c72c6c708381632158e55b0b34053df367 | 7e93b1c33045b4c03054f42b6a2b800279b12a9b | /core/cache/backends/dummy.py | dd3b537c31f94fd76440864c0633ceb9028bcdbe | [
"MIT"
] | permissive | anthill-arch/framework | 6f8036980667843f2be1414850255cf6a10e2dcd | a6c238a62ae9c3fb319d12e77f7e9047aab75e8d | refs/heads/master | 2020-05-09T06:01:31.186830 | 2019-08-23T13:52:43 | 2019-08-23T13:52:43 | 180,988,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | """Dummy cache backend"""
from anthill.framework.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
super().__init__(*args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def clear(self):
...
| [
"[email protected]"
] | |
990c86e2db482e655b196f5e1532da5ba6511e28 | 21b201ebf2ffbbc19fa8d74e5657e12ef597b02d | /research/attention_ocr/python/datasets/fsns_test.py | ae4bd198024cacb58466a58ae414192674610642 | [] | no_license | alhsnouf/model | fa619691ad9d0afc7ad849a9471e6bb0643a8d47 | 5fe429b115634e642a7469b3f1d4bc0c5cf98782 | refs/heads/master | 2021-04-12T11:16:02.150045 | 2018-03-27T15:19:18 | 2018-03-27T15:19:18 | 126,702,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:adecb7ba327b3d3e67e0b87d2739a254b0a9b16b81fbffc730ee0c03476db08d
size 3374
| [
"[email protected]"
] | |
a5eb64360406339ff21e0b63e1436f7afdc47e43 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/RIP-IP-PRIVATE-MIB.py | eb15ca1696c7f85b183cd39d2ae9dd12d855a403 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 10,204 | py | #
# PySNMP MIB module RIP-IP-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RIP-IP-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:48:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion")
cjnProtocol, = mibBuilder.importSymbols("Cajun-ROOT", "cjnProtocol")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, NotificationType, Counter32, ObjectIdentity, ModuleIdentity, Unsigned32, Integer32, iso, Gauge32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "NotificationType", "Counter32", "ObjectIdentity", "ModuleIdentity", "Unsigned32", "Integer32", "iso", "Gauge32", "Counter64")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
cjnRip = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10))
if mibBuilder.loadTexts: cjnRip.setLastUpdated('9902110000Z')
if mibBuilder.loadTexts: cjnRip.setOrganization("Lucent's Concord Technology Center (CTC)")
cjnIpRipGblGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 1))
cjnIpRipIsEnabled = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipIsEnabled.setStatus('current')
cjnIpRipUpdateTimer = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipUpdateTimer.setStatus('current')
cjnIpRipPurgeTTL = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipPurgeTTL.setStatus('current')
cjnIpRipTriggeredUpdates = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipTriggeredUpdates.setStatus('current')
cjnIpRipInterPktDelay = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipInterPktDelay.setStatus('current')
cjnIpRipStatGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 2))
cjnIpRipIfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3))
cjnIpRipIfTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1), )
if mibBuilder.loadTexts: cjnIpRipIfTable.setStatus('current')
cjnIpRipIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1), ).setIndexNames((0, "RIP-IP-PRIVATE-MIB", "cjnIpRipIfIpAddr"))
if mibBuilder.loadTexts: cjnIpRipIfEntry.setStatus('current')
cjnIpRipIfIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpRipIfIpAddr.setStatus('current')
cjnIpRipIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfRowStatus.setStatus('current')
cjnIpRipIfSendRcvMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("talkOnly", 1), ("listenOnly", 2), ("talkAndListen", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfSendRcvMode.setStatus('current')
cjnIpRipIfSendVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("v1", 1), ("v2", 2), ("v1v2", 3), ("off", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfSendVersion.setStatus('current')
cjnIpRipIfRcvVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("v1", 1), ("v2", 2), ("v1v2", 3), ("off", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfRcvVersion.setStatus('current')
cjnIpRipIfDefaultRouteMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("talkOnly", 1), ("listenOnly", 2), ("talkAndListen", 3), ("disable", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfDefaultRouteMode.setStatus('current')
cjnIpRipIfPoisonMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("splitHorizon", 1), ("splitHorizonWithPoisonReverse", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfPoisonMethod.setStatus('current')
cjnIpRipIfAuthType = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("simplePassword", 2), ("mD5", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfAuthType.setStatus('current')
cjnIpRipIfAuthKey = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 1, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipIfAuthKey.setStatus('current')
cjnIpRipIfStatTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2), )
if mibBuilder.loadTexts: cjnIpRipIfStatTable.setStatus('current')
cjnIpRipIfStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1), ).setIndexNames((0, "RIP-IP-PRIVATE-MIB", "cjnIpRipIfStatIpAddr"))
if mibBuilder.loadTexts: cjnIpRipIfStatEntry.setStatus('current')
cjnIpRipIfStatIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpRipIfStatIpAddr.setStatus('current')
cjnIpRipIfUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipIfUpdatesSent.setStatus('current')
cjnIpRipIfUpdatesRcvd = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipIfUpdatesRcvd.setStatus('current')
cjnIpRipIfTrigUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipIfTrigUpdatesSent.setStatus('current')
cjnIpRipIfBadPktRcvd = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipIfBadPktRcvd.setStatus('current')
cjnIpRipIfBadRoutesRcvd = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 3, 2, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpRipIfBadRoutesRcvd.setStatus('current')
cjnIpRipNeighborGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 4))
cjnIpRipNeighborTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 4, 1), )
if mibBuilder.loadTexts: cjnIpRipNeighborTable.setStatus('current')
cjnIpRipNeighborEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 4, 1, 1), ).setIndexNames((0, "RIP-IP-PRIVATE-MIB", "cjnIpRipNeighborIpAddr"))
if mibBuilder.loadTexts: cjnIpRipNeighborEntry.setStatus('current')
cjnIpRipNeighborIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 4, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpRipNeighborIpAddr.setStatus('current')
cjnIpRipNeighborRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10, 4, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpRipNeighborRowStatus.setStatus('current')
mibBuilder.exportSymbols("RIP-IP-PRIVATE-MIB", cjnIpRipIfRowStatus=cjnIpRipIfRowStatus, cjnIpRipIfUpdatesSent=cjnIpRipIfUpdatesSent, cjnIpRipIfRcvVersion=cjnIpRipIfRcvVersion, cjnIpRipIsEnabled=cjnIpRipIsEnabled, cjnIpRipIfDefaultRouteMode=cjnIpRipIfDefaultRouteMode, cjnIpRipNeighborGroup=cjnIpRipNeighborGroup, cjnIpRipNeighborTable=cjnIpRipNeighborTable, cjnIpRipIfBadRoutesRcvd=cjnIpRipIfBadRoutesRcvd, cjnIpRipIfStatIpAddr=cjnIpRipIfStatIpAddr, cjnIpRipNeighborRowStatus=cjnIpRipNeighborRowStatus, cjnIpRipIfEntry=cjnIpRipIfEntry, cjnIpRipIfIpAddr=cjnIpRipIfIpAddr, cjnIpRipUpdateTimer=cjnIpRipUpdateTimer, cjnIpRipNeighborEntry=cjnIpRipNeighborEntry, cjnIpRipPurgeTTL=cjnIpRipPurgeTTL, cjnIpRipNeighborIpAddr=cjnIpRipNeighborIpAddr, cjnIpRipIfPoisonMethod=cjnIpRipIfPoisonMethod, cjnIpRipIfBadPktRcvd=cjnIpRipIfBadPktRcvd, cjnIpRipIfSendVersion=cjnIpRipIfSendVersion, cjnIpRipGblGroup=cjnIpRipGblGroup, cjnIpRipIfTrigUpdatesSent=cjnIpRipIfTrigUpdatesSent, PYSNMP_MODULE_ID=cjnRip, cjnIpRipIfAuthType=cjnIpRipIfAuthType, cjnIpRipIfStatEntry=cjnIpRipIfStatEntry, cjnRip=cjnRip, cjnIpRipInterPktDelay=cjnIpRipInterPktDelay, cjnIpRipIfTable=cjnIpRipIfTable, cjnIpRipIfGroup=cjnIpRipIfGroup, cjnIpRipIfUpdatesRcvd=cjnIpRipIfUpdatesRcvd, cjnIpRipStatGroup=cjnIpRipStatGroup, cjnIpRipIfAuthKey=cjnIpRipIfAuthKey, cjnIpRipIfStatTable=cjnIpRipIfStatTable, cjnIpRipIfSendRcvMode=cjnIpRipIfSendRcvMode, cjnIpRipTriggeredUpdates=cjnIpRipTriggeredUpdates)
| [
"[email protected]"
] | |
7589cc724a6de9341855ab64e3e966192de85fb8 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/associate_router_request_body.py | 1fee2936721baab02638a1cc01ff60243363cc0b | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,088 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AssociateRouterRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'router': 'Router'
}
attribute_map = {
'router': 'router'
}
def __init__(self, router=None):
"""AssociateRouterRequestBody
The model defined in huaweicloud sdk
:param router:
:type router: :class:`huaweicloudsdkdns.v2.Router`
"""
self._router = None
self.discriminator = None
self.router = router
@property
def router(self):
"""Gets the router of this AssociateRouterRequestBody.
:return: The router of this AssociateRouterRequestBody.
:rtype: :class:`huaweicloudsdkdns.v2.Router`
"""
return self._router
@router.setter
def router(self, router):
"""Sets the router of this AssociateRouterRequestBody.
:param router: The router of this AssociateRouterRequestBody.
:type router: :class:`huaweicloudsdkdns.v2.Router`
"""
self._router = router
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AssociateRouterRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
861702d38884274207c83fc6a9221108052e545b | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/holidays/holiday_commands.py | d5c08c9ea3a12970f2e84cae9f7fa63bd14fa313 | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\holidays\holiday_commands.py
# Compiled at: 2018-03-22 23:30:43
# Size of source mod 2**32: 2819 bytes
from protocolbuffers import GameplaySaveData_pb2, DistributorOps_pb2
from google.protobuf import text_format
from seasons.seasons_enums import SeasonType
from server_commands.argument_helpers import TunableInstanceParam, OptionalSimInfoParam, get_optional_target
import services, sims4.commands
@sims4.commands.Command('holiday.get_holiday_data', command_type=(sims4.commands.CommandType.Live))
def get_holiday_data(holiday_id: int, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
holiday_service.send_holiday_info_message(holiday_id)
@sims4.commands.Command('holiday.get_active_holiday_data', command_type=(sims4.commands.CommandType.Live))
def get_active_holiday_data(opt_sim: OptionalSimInfoParam=None, _connection=None):
sim_info = get_optional_target(opt_sim, target_type=OptionalSimInfoParam, _connection=_connection)
if sim_info is None:
sims4.commands.output('Failed to find SimInfo.')
return
sim_info.household.holiday_tracker.send_active_holiday_info_message(DistributorOps_pb2.SendActiveHolidayInfo.START)
@sims4.commands.Command('holiday.update_holiday', command_type=(sims4.commands.CommandType.Live))
def update_holiday(holiday_data: str, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
proto = GameplaySaveData_pb2.Holiday()
text_format.Merge(holiday_data, proto)
holiday_service.modify_holiday(proto)
@sims4.commands.Command('holiday.add_holiday', command_type=(sims4.commands.CommandType.Live))
def add_holiday(holiday_data: str, season: SeasonType, day: int, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
proto = GameplaySaveData_pb2.Holiday()
text_format.Merge(holiday_data, proto)
holiday_service.add_a_holiday(proto, season, day)
@sims4.commands.Command('holiday.remove_holiday', command_type=(sims4.commands.CommandType.Live))
def remove_holiday(holiday_id: int, _connection=None):
holiday_service = services.holiday_service()
if holiday_service is None:
return
holiday_service.remove_a_holiday(holiday_id) | [
"[email protected]"
] | |
7ea4bf955c72f0899acb5deec87b995d090645a2 | ccbf98db11ce2f7f6d21b846e90b806896be8429 | /Your_first_neural_network.py | e31c96471a17845499caa583158484ad019ea377 | [] | no_license | MartinSnow/first-neural-network | 3b1514b7e90f041a8dca9dd2e73d8b0c05c53f4c | 7123fb5d904bcb713bb3c2b2dc07dada62078ca0 | refs/heads/master | 2020-03-19T00:03:42.608144 | 2018-05-30T14:56:11 | 2018-05-30T14:56:11 | 135,450,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,135 | py |
# coding: utf-8
# # 你的第一个神经网络
#
# 在此项目中,你将构建你的第一个神经网络,并用该网络预测每日自行车租客人数。我们提供了一些代码,但是需要你来实现神经网络(大部分内容)。提交此项目后,欢迎进一步探索该数据和模型。
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## 加载和准备数据
#
# 构建神经网络的关键一步是正确地准备数据。不同尺度级别的变量使网络难以高效地掌握正确的权重。我们在下方已经提供了加载和准备数据的代码。你很快将进一步学习这些代码!
# In[2]:
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# In[3]:
rides.head()
# ## 数据简介
#
# 此数据集包含的是从 2011 年 1 月 1 日到 2012 年 12 月 31 日期间每天每小时的骑车人数。骑车用户分成临时用户和注册用户,cnt 列是骑车用户数汇总列。你可以在上方看到前几行数据。
#
# 下图展示的是数据集中前 10 天左右的骑车人数(某些天不一定是 24 个条目,所以不是精确的 10 天)。你可以在这里看到每小时租金。这些数据很复杂!周末的骑行人数少些,工作日上下班期间是骑行高峰期。我们还可以从上方的数据中看到温度、湿度和风速信息,所有这些信息都会影响骑行人数。你需要用你的模型展示所有这些数据。
# In[4]:
rides[:24*10].plot(x='dteday', y='cnt')
# ### 虚拟变量(哑变量)
#
# 下面是一些分类变量,例如季节、天气、月份。要在我们的模型中包含这些数据,我们需要创建二进制虚拟变量。用 Pandas 库中的 `get_dummies()` 就可以轻松实现。
# In[5]:
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# ### 调整目标变量
#
# 为了更轻松地训练网络,我们将对每个连续变量标准化,即转换和调整变量,使它们的均值为 0,标准差为 1。
#
# 我们会保存换算因子,以便当我们使用网络进行预测时可以还原数据。
# In[6]:
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# ### 将数据拆分为训练、测试和验证数据集
#
# 我们将大约最后 21 天的数据保存为测试数据集,这些数据集会在训练完网络后使用。我们将使用该数据集进行预测,并与实际的骑行人数进行对比。
# In[7]:
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# 我们将数据拆分为两个数据集,一个用作训练,一个在网络训练完后用来验证网络。因为数据是有时间序列特性的,所以我们用历史数据进行训练,然后尝试预测未来数据(验证数据集)。
# In[8]:
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
# ## 开始构建网络
#
# 下面你将构建自己的网络。我们已经构建好结构和反向传递部分。你将实现网络的前向传递部分。还需要设置超参数:学习速率、隐藏单元的数量,以及训练传递数量。
#
# <img src="assets/neural_network.png" width=300px>
#
# 该网络有两个层级,一个隐藏层和一个输出层。隐藏层级将使用 S 型函数作为激活函数。输出层只有一个节点,用于递归,节点的输出和节点的输入相同。即激活函数是 $f(x)=x$。这种函数获得输入信号,并生成输出信号,但是会考虑阈值,称为激活函数。我们完成网络的每个层级,并计算每个神经元的输出。一个层级的所有输出变成下一层级神经元的输入。这一流程叫做前向传播(forward propagation)。
#
# 我们在神经网络中使用权重将信号从输入层传播到输出层。我们还使用权重将错误从输出层传播回网络,以便更新权重。这叫做反向传播(backpropagation)。
#
# > **提示**:你需要为反向传播实现计算输出激活函数 ($f(x) = x$) 的导数。如果你不熟悉微积分,其实该函数就等同于等式 $y = x$。该等式的斜率是多少?也就是导数 $f(x)$。
#
#
# 你需要完成以下任务:
#
# 1. 实现 S 型激活函数。将 `__init__` 中的 `self.activation_function` 设为你的 S 型函数。
# 2. 在 `train` 方法中实现前向传递。
# 3. 在 `train` 方法中实现反向传播算法,包括计算输出错误。
# 4. 在 `run` 方法中实现前向传递。
#
#
# In[9]:
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(error[:,None], self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term*X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term*hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
# In[10]:
def MSE(y, Y):
return np.mean((y-Y)**2)
# ## 单元测试
#
# 运行这些单元测试,检查你的网络实现是否正确。这样可以帮助你确保网络已正确实现,然后再开始训练网络。这些测试必须成功才能通过此项目。
# In[11]:
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
# ## 训练网络
#
# 现在你将设置网络的超参数。策略是设置的超参数使训练集上的错误很小但是数据不会过拟合。如果网络训练时间太长,或者有太多的隐藏节点,可能就会过于针对特定训练集,无法泛化到验证数据集。即当训练集的损失降低时,验证集的损失将开始增大。
#
# 你还将采用随机梯度下降 (SGD) 方法训练网络。对于每次训练,都获取随机样本数据,而不是整个数据集。与普通梯度下降相比,训练次数要更多,但是每次时间更短。这样的话,网络训练效率更高。稍后你将详细了解 SGD。
#
#
# ### 选择迭代次数
#
# 也就是训练网络时从训练数据中抽样的批次数量。迭代次数越多,模型就与数据越拟合。但是,如果迭代次数太多,模型就无法很好地泛化到其他数据,这叫做过拟合。你需要选择一个使训练损失很低并且验证损失保持中等水平的数字。当你开始过拟合时,你会发现训练损失继续下降,但是验证损失开始上升。
#
# ### 选择学习速率
#
# 速率可以调整权重更新幅度。如果速率太大,权重就会太大,导致网络无法与数据相拟合。建议从 0.1 开始。如果网络在与数据拟合时遇到问题,尝试降低学习速率。注意,学习速率越低,权重更新的步长就越小,神经网络收敛的时间就越长。
#
#
# ### 选择隐藏节点数量
#
# 隐藏节点越多,模型的预测结果就越准确。尝试不同的隐藏节点的数量,看看对性能有何影响。你可以查看损失字典,寻找网络性能指标。如果隐藏单元的数量太少,那么模型就没有足够的空间进行学习,如果太多,则学习方向就有太多的选择。选择隐藏单元数量的技巧在于找到合适的平衡点。
# In[12]:
import sys
### Set the hyperparameters here ###
iterations = 3000
learning_rate = 0.5
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.loc[batch].values, train_targets.loc[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) + "% ... Training loss: " + str(train_loss)[:5] + " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# In[13]:
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
# ## 检查预测结果
#
# 使用测试数据看看网络对数据建模的效果如何。如果完全错了,请确保网络中的每步都正确实现。
# In[14]:
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
# ## 可选:思考下你的结果(我们不会评估这道题的答案)
#
#
# 请针对你的结果回答以下问题。模型对数据的预测效果如何?哪里出现问题了?为何出现问题呢?
#
# > **注意**:你可以通过双击该单元编辑文本。如果想要预览文本,请按 Control + Enter
#
# #### 请将你的答案填写在下方
#
| [
"[email protected]"
] | |
3eeadc182efdc464262b1666ed9e13d5177d14a7 | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /Code/.history/listogram_20200211102152.py | dba8833384547273220a2bdcaa8ac891eb054bfd | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 5,653 | py | #!python
from __future__ import division, print_function # Python 2 and 3 compatibility
import random
from sample import prob_sample
class Listogram(list):
"""Listogram is a histogram implemented as a subclass of the list type."""
def __init__(self, word_list=None):
"""Initialize this histogram as a new list and count given words."""
super(Listogram, self).__init__() # Initialize this as a new list
# Add properties to track useful word counts for this histogram
self.types = 0 # Count of distinct word types in this histogram
self.tokens = 0 # Total count of all word tokens in this histogram
# Count words in given list, if any
# word_list = word_list.split()
if word_list is not None:
for word in word_list:
self.add_count(word)
def add_count(self, word, count=1):
"""Increase frequency count of given word by given count amount."""
# TODO: Increase word frequency by count
# add count to tokens
self.tokens += count
# first case: word exists -> increment count for that word
# print(instance)
if self.__contains__(word) == True:
index = self.index_of(word)
# print(index)
self[index][1] += count
# second case: word dne --> append instance of word
else:
self.append([word, count])
self.types += 1
def frequency(self, word):
"""Return frequency count of given word, or 0 if word is not found."""
# TODO: Retrieve word frequency count
for item in self:
if item[0] == word:
return item[1]
return 0
def __contains__(self, word):
"""Return boolean indicating if given word is in this histogram."""
# TODO: Check if word is in this histogram
for item in self:
if item[0] == word:
return True
return False
def index_of(self, target):
"""Return the index of entry containing given target word if found in
this histogram, or None if target word is not found."""
# TODO: Implement linear search to find index of entry with target word
print(self)
for i in range(len(self)):
# print(i)
if target == self[i][0]:
# print(self[i][0])
return int(i)
return None
# for item in self:
# if item[0] == target:
# return self.index(item)
# return None
def sample(self):
"""Return a word from this histogram, randomly sampled by weighting
each word's probability of being chosen by its observed frequency."""
# TODO: Randomly choose a word based on its frequency in this histogram
return prob_sample(self)
def print_histogram(word_list):
print()
print('Histogram:')
print('word list: {}'.format(word_list))
# Create a listogram and display its contents
histogram = Listogram(word_list)
print('listogram: {}'.format(histogram))
print('{} tokens, {} types'.format(histogram.tokens, histogram.types))
for word in word_list[-2:]:
freq = histogram.frequency(word)
print('{!r} occurs {} times'.format(word, freq))
print()
print_histogram_samples(histogram)
def print_histogram_samples(histogram):
print('Histogram samples:')
# Sample the histogram 10,000 times and count frequency of results
samples_list = [histogram.sample() for _ in range(10000)]
samples_hist = Listogram(samples_list)
print('samples: {}'.format(samples_hist))
print()
print('Sampled frequency and error from observed frequency:')
header = '| word type | observed freq | sampled freq | error |'
divider = '-' * len(header)
print(divider)
print(header)
print(divider)
# Colors for error
green = '\033[32m'
yellow = '\033[33m'
red = '\033[31m'
reset = '\033[m'
# Check each word in original histogram
for word, count in histogram:
# Calculate word's observed frequency
observed_freq = count / histogram.tokens
# Calculate word's sampled frequency
samples = samples_hist.frequency(word)
sampled_freq = samples / samples_hist.tokens
# Calculate error between word's sampled and observed frequency
error = (sampled_freq - observed_freq) / observed_freq
color = green if abs(error) < 0.05 else yellow if abs(error) < 0.1 else red
print('| {!r:<9} '.format(word)
+ '| {:>4} = {:>6.2%} '.format(count, observed_freq)
+ '| {:>4} = {:>6.2%} '.format(samples, sampled_freq)
+ '| {}{:>+7.2%}{} |'.format(color, error, reset))
print(divider)
print()
def main():
import sys
arguments = sys.argv[1:] # Exclude script name in first argument
if len(arguments) >= 1:
# Test histogram on given arguments
print_histogram(arguments)
else:
# Test histogram on letters in a word
word = 'abracadabra'
print_histogram(list(word))
# Test histogram on words in a classic book title
fish_text = 'one fish two fish red fish blue fish'
print_histogram(fish_text.split())
# Test histogram on words in a long repetitive sentence
woodchuck_text = ('how much wood would a wood chuck chuck'
' if a wood chuck could chuck wood')
print_histogram(woodchuck_text.split())
if __name__ == '__main__':
main()
print(listogram('one fish two fish red fish blue fish').items()
| [
"[email protected]"
] | |
8777583da741851996ff7641ab50d2af2560ea1b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03073/s506049287.py | e7b40d93af0372410ecb41380792639e0b71e517 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | s=input()
print(min(s[::2].count("1")+s[1::2].count("0"),s[::2].count("0")+s[1::2].count("1"))) | [
"[email protected]"
] | |
78e865b1517223315d05ca75adde29b5a753601b | acad86f0365aa0b5b613644a896bffb9d4dc533f | /django_session_jwt/test.py | 8e931858e91b4e4cab19fa50500ef1161aa607b8 | [
"MIT"
] | permissive | willstott101/django-session-jwt | f85be9c9d5cefe26f5e6253886018baa256c2a83 | ca5dc90c61190305f902ceab03a30abf0e184865 | refs/heads/master | 2023-08-29T13:42:35.587372 | 2021-09-23T19:18:26 | 2021-09-23T19:18:26 | 421,750,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | from importlib import import_module
from django.conf import settings
from django.test.client import Client as BaseClient
from django.contrib.auth import SESSION_KEY, get_user_model
from django_session_jwt.middleware import convert_cookie, verify_jwt
User = get_user_model()
class Client(BaseClient):
def login(self, **credentials):
ret = super(Client, self).login(**credentials)
if ret:
user = User.objects.get(id=int(self.session[SESSION_KEY]))
convert_cookie(self.cookies, user)
return ret
@property
def session(self):
"""
Obtains the current session variables.
"""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
sk = verify_jwt(cookie.value).get('sk', cookie.value)
return engine.SessionStore(sk)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
| [
"[email protected]"
] | |
9c3072e28bebc804c79f0b1fa0248796a4500f7e | 82e8593a7e91a260a39b150e13f25741da4d6f8f | /pylib/cli.py | a4dbf7efd887a15e93d2dcb068d85565d0cc2891 | [] | no_license | grandi23/winterpy | 5b9074c62d0f3da370705b99ae2a5f9d9f6ada59 | 989fe6595dc53dca9a0f6da3feb8e05d009d7958 | refs/heads/master | 2021-01-17T12:08:10.737815 | 2015-05-03T09:24:48 | 2015-05-03T09:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | # vim:fileencoding=utf-8
# This file is in the Public Domain
'''
Convenient functions for command-line use.
Python 2 & 3
'''
import os
import re
def repl(local, histfile=None, banner=None):
import readline
import rlcompleter
if 'libedit' in readline.__doc__:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
if histfile is not None and os.path.exists(histfile):
# avoid duplicate reading
if readline.get_current_history_length() <= 0:
readline.set_history_length(10000)
readline.read_history_file(histfile)
import code
readline.set_completer(rlcompleter.Completer(local).complete)
code.interact(local=local, banner=banner)
if histfile is not None:
readline.write_history_file(histfile)
def repl_reset_stdin(*args, **kwargs):
fd = os.open('/dev/tty', os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
repl(*args, **kwargs)
def _translate(m):
s = m.group(0)
type, code = s[1], int(s[2:], 16)
if type == 'x':
return chr(code)
else:
return unichr(code).encode('utf-8')
def unescape_py2(s):
return re.sub(r'\\x[0-9A-Fa-f]{2}|\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8}',
_translate, s)
def repl_py27(local, *args, **kwargs):
'''Fix unicode display in Python 2.x; Console encoding must be UTF-8'''
import re, sys
def displayfunc(value):
if value is None:
local['_'] = None
return
r = repr(value)
r = unescape_py2(r)
print(r)
local['_'] = value
old_displayhook = sys.displayhook
sys.displayhook = displayfunc
try:
repl(local, *args, **kwargs)
finally:
sys.displayhook = old_displayhook
if __name__ == '__main__':
import sys
if sys.version_info[0] == 3:
repl_func = repl
else:
repl_func = repl_py27
repl_func(vars(), os.path.expanduser('~/.pyhistory'))
| [
"[email protected]"
] | |
e8d8d9af59056af753ef58acd36c9b21d2d45cbf | 39354dfc8f61f57f022522a3e3a880c73a540d0d | /shenfun/utilities/__init__.py | ab4cb0c3b03aa606b0854b11a59de03b0f3e750b | [
"BSD-2-Clause"
] | permissive | mstf1985/shenfun | aab9cd416ea7cb549ef191ed9e32f4cd66d522d0 | 83a28b3f7142ef3bf60b20d707ba8c1d2f13a8ff | refs/heads/master | 2022-12-10T20:27:51.825173 | 2020-08-28T13:58:58 | 2020-08-28T13:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,621 | py | """
Module for implementing helper functions.
"""
import types
from numbers import Number
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
from collections import defaultdict
import numpy as np
import sympy as sp
from scipy.fftpack import dct
from shenfun.optimization import optimizer
__all__ = ['dx', 'clenshaw_curtis1D', 'CachedArrayDict',
'outer', 'apply_mask', 'integrate_sympy', 'mayavi_show']
def dx(u):
r"""Compute integral of u over domain
.. math::
\int_{\Omega} u dx
Parameters
----------
u : Array
The Array to integrate
"""
T = u.function_space()
uc = u.copy()
dim = len(u.shape)
if dim == 1:
w = T.points_and_weights(weighted=False)[1]
return np.sum(uc*w).item()
for ax in range(dim):
uc = uc.redistribute(axis=ax)
w = T.bases[ax].points_and_weights(weighted=False)[1]
sl = [np.newaxis]*len(uc.shape)
sl[ax] = slice(None)
uu = np.sum(uc*w[tuple(sl)], axis=ax)
sl = [slice(None)]*len(uc.shape)
sl[ax] = np.newaxis
uc[:] = uu[tuple(sl)]
return uc.flat[0]
def clenshaw_curtis1D(u, quad="GC"): # pragma: no cover
"""Clenshaw-Curtis integration in 1D"""
assert u.ndim == 1
N = u.shape[0]
if quad == 'GL':
w = np.arange(0, N, 1, dtype=float)
w[2:] = 2./(1-w[2:]**2)
w[0] = 1
w[1::2] = 0
ak = dct(u, 1)
ak /= (N-1)
return np.sqrt(np.sum(ak*w))
assert quad == 'GC'
d = np.zeros(N)
k = 2*(1 + np.arange((N-1)//2))
d[::2] = (2./N)/np.hstack((1., 1.-k*k))
w = dct(d, type=3)
return np.sqrt(np.sum(u*w))
class CachedArrayDict(MutableMapping):
"""Dictionary for caching Numpy arrays (work arrays)
Example
-------
>>> import numpy as np
>>> from shenfun.utilities import CachedArrayDict
>>> work = CachedArrayDict()
>>> a = np.ones((3, 4), dtype=int)
>>> w = work[(a, 0, True)] # create work array with shape as a
>>> print(w.shape)
(3, 4)
>>> print(w)
[[0 0 0 0]
[0 0 0 0]
[0 0 0 0]]
>>> w2 = work[(a, 1, True)] # Get different(note 1!) array of same shape/dtype
"""
def __init__(self):
self._data = {}
def __getitem__(self, key):
newkey, fill = self.__keytransform__(key)
try:
value = self._data[newkey]
except KeyError:
shape, dtype, _ = newkey
value = np.zeros(shape, dtype=np.dtype(dtype, align=True))
self._data[newkey] = value
if fill:
value.fill(0)
return value
@staticmethod
def __keytransform__(key):
assert len(key) == 3
return (key[0].shape, key[0].dtype, key[1]), key[2]
def __len__(self):
return len(self._data)
def __setitem__(self, key, value):
self._data[self.__keytransform__(key)[0]] = value
def __delitem__(self, key):
del self._data[self.__keytransform__(key)[0]]
def __iter__(self):
return iter(self._data)
def values(self):
raise TypeError('Cached work arrays not iterable')
def outer(a, b, c):
r"""Return outer product $c_{i,j} = a_i b_j$
Parameters
----------
a : Array of shape (N, ...)
b : Array of shape (N, ...)
c : Array of shape (N*N, ...)
The outer product is taken over the first index of a and b,
for all remaining indices.
"""
av = a.v
bv = b.v
cv = c.v
symmetric = a is b
if av.shape[0] == 2:
outer2D(av, bv, cv, symmetric)
elif av.shape[0] == 3:
outer3D(av, bv, cv, symmetric)
return c
@optimizer
def outer2D(a, b, c, symmetric):
c[0] = a[0]*b[0]
c[1] = a[0]*b[1]
if symmetric:
c[2] = c[1]
else:
c[2] = a[1]*b[0]
c[3] = a[1]*b[1]
@optimizer
def outer3D(a, b, c, symmetric):
c[0] = a[0]*b[0]
c[1] = a[0]*b[1]
c[2] = a[0]*b[2]
if symmetric:
c[3] = c[1]
c[6] = c[2]
c[7] = c[5]
else:
c[3] = a[1]*b[0]
c[6] = a[2]*b[0]
c[7] = a[2]*b[1]
c[4] = a[1]*b[1]
c[5] = a[1]*b[2]
c[8] = a[2]*b[2]
@optimizer
def apply_mask(u_hat, mask):
if mask is not None:
u_hat *= mask
return u_hat
def integrate_sympy(f, d):
"""Exact definite integral using sympy
Try to convert expression `f` to a polynomial before integrating.
See sympy issue https://github.com/sympy/sympy/pull/18613 to why this is
needed. Poly().integrate() is much faster than sympy.integrate() when applicable.
Parameters
----------
f : sympy expression
d : 3-tuple
First item the symbol, next two the lower and upper integration limits
"""
try:
p = sp.Poly(f, d[0]).integrate()
return p(d[2]) - p(d[1])
except sp.PolynomialError:
#return sp.Integral(f, d).evalf()
return sp.integrate(f, d)
def split(measures):
#ms = sp.sympify(measures).expand()
#ms = ms if isinstance(ms, tuple) else [ms]
#result = []
#for m in ms:
# if sp.simplify(m) == 0:
# continue
# d = {'coeff': m} if isinstance(m, Number) else sp.separatevars(m, dict=True)
# d = defaultdict(lambda: 1, {str(k): sp.simplify(v) for k, v in d.items()})
# dc = d['coeff']
# d['coeff'] = int(dc) if isinstance(dc, (sp.Integer, int)) else float(dc)
# result.append(d)
#return result
def _split(mss, result):
for ms in mss:
ms = sp.sympify(ms)
if isinstance(ms, sp.Mul):
# Multiplication of two or more terms
result = _split(ms.args, result)
continue
# Something else with only one symbol
sym = ms.free_symbols
assert len(sym) <= 1
if len(sym) == 1:
sym = sym.pop()
result[str(sym)] *= ms
else:
ms = int(ms) if isinstance(ms, sp.Integer) else float(ms)
result['coeff'] *= ms
return result
ms = sp.sympify(measures).expand()
result = []
if isinstance(ms, sp.Add):
for arg in ms.args:
result.append(_split([arg], defaultdict(lambda: 1)))
else:
result.append(_split([ms], defaultdict(lambda: 1)))
return result
def mayavi_show():
"""
Return show function that updates the mayavi figure in the background.
"""
from pyface.api import GUI
from mayavi import mlab
return mlab.show(GUI().stop_event_loop)
| [
"[email protected]"
] | |
92860749415e2e2d47633bedc17980cca62a426e | 1a57701cb4d1646aafde8697822bbccac75384d2 | /backend/manage.py | c4af1014429b9188efdb9af4081d3e319f46a4d3 | [] | no_license | crowdbotics-apps/msm-sjsjskjsk8765-d-13922 | 79159dc8e38253fba76dfd706b8846be04a46c2a | 70e94d23e2b32e62ced3ef27304a4cef63763e1b | refs/heads/master | 2022-12-31T19:00:03.093402 | 2020-10-23T10:11:57 | 2020-10-23T10:11:57 | 306,597,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'msm_sjsjskjsk8765_d_13922.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4e5392e6a97b9f2543a239f9adb0cf149741da5a | 3a5ea75a5039207104fd478fb69ac4664c3c3a46 | /vega/modules/operators/cell.py | d03942590a2e4c9f3881d5e842d8bfb35026b39d | [
"MIT"
] | permissive | fmsnew/vega | e3df25efa6af46073c441f41da4f2fdc4929fec5 | 8e0af84a57eca5745fe2db3d13075393838036bb | refs/heads/master | 2023-06-10T04:47:11.661814 | 2021-06-26T07:45:30 | 2021-06-26T07:45:30 | 285,174,199 | 0 | 0 | MIT | 2020-08-11T14:19:09 | 2020-08-05T03:59:49 | Python | UTF-8 | Python | false | false | 11,111 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import all torch operators."""
import vega
from vega.common import ClassType, ClassFactory
from vega.modules.operators import ops
from vega.modules.operators.mix_ops import MixedOp, OPS
from vega.modules.operators.conv import conv_bn_relu, Seq, FactorizedReduce, ReLUConvBN
@ClassFactory.register(ClassType.NETWORK)
class Cell(ops.Module):
"""Cell structure according to desc."""
concat_size = 0
def __init__(self, genotype, steps, concat, reduction, reduction_prev=None, C_prev_prev=None, C_prev=None, C=None):
"""Init Cell."""
super(Cell, self).__init__()
self.genotype = genotype
self.steps = steps
self.concat = concat
self.reduction = reduction
self.reduction_prev = reduction_prev
self.C_prev_prev = C_prev_prev
self.C_prev = C_prev
self.C = C
self.concat_size = 0
affine = True
if isinstance(self.genotype[0][0], list):
affine = False
if self.reduction_prev:
self.preprocess0 = FactorizedReduce(self.C_prev_prev, self.C, affine)
else:
self.preprocess0 = ReLUConvBN(self.C_prev_prev, self.C, 1, 1, 0, affine)
self.preprocess1 = ReLUConvBN(self.C_prev, self.C, 1, 1, 0, affine)
op_names, indices_out, indices_inp = zip(*self.genotype)
self.build_ops(self.C, op_names, indices_out, indices_inp, self.concat, self.reduction)
self.concat_size = len(self.concat)
self.torch_flag = vega.is_torch_backend()
def build_ops(self, C, op_names, indices_out, indices_inp, concat, reduction):
"""Compile the cell.
:param C: channels of this cell
:type C: int
:param op_names: list of all the operations in description
:type op_names: list of str
:param indices_out: list of all output nodes
:type indices_out: list of int
:param indices_inp: list of all input nodes link to output node
:type indices_inp: list of int
:param concat: cell concat list of output node
:type concat: list of int
:param reduction: whether to reduce
:type reduction: bool
"""
self._concat = concat
self._multiplier = len(concat)
self.out_inp_list = []
temp_list = []
idx_cmp = 2
_op_list = []
for i in range(len(op_names)):
if indices_out[i] == idx_cmp:
temp_list.append(indices_inp[i])
elif indices_out[i] > idx_cmp:
self.out_inp_list.append(temp_list.copy())
temp_list = []
idx_cmp += 1
temp_list.append(indices_inp[i])
else:
raise Exception("input index should not less than idx_cmp")
stride = 2 if reduction and indices_inp[i] < 2 else 1
op = MixedOp(C=C, stride=stride, ops_cands=op_names[i])
_op_list.append(op)
self.op_list = Seq(*tuple(_op_list))
self.oplist = list(self.op_list.children())
self.out_inp_list.append(temp_list.copy())
if len(self.out_inp_list) != self.steps:
raise Exception("out_inp_list length should equal to steps")
def call(self, s0, s1, weights=None, drop_path_prob=0, selected_idxs=None):
"""Forward function of Cell.
:param s0: feature map of previous of previous cell
:type s0: torch tensor
:param s1: feature map of previous cell
:type s1: torch tensor
:param weights: weights of operations in cell
:type weights: torch tensor, 2 dimension
:return: cell output
:rtype: torch tensor
"""
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
idx = 0
for i in range(self.steps):
hlist = []
for j, inp in enumerate(self.out_inp_list[i]):
op = self.oplist[idx + j]
if selected_idxs is None:
if weights is None:
h = op(states[inp])
else:
h = op(states[inp], weights[idx + j])
if drop_path_prob > 0. and not isinstance(list(op.children())[0], ops.Identity):
h = ops.drop_path(h, drop_path_prob)
hlist.append(h)
elif selected_idxs[idx + j] == -1:
# undecided mix edges
h = op(states[inp], weights[idx + j])
hlist.append(h)
elif selected_idxs[idx + j] == 0:
# zero operation
continue
else:
h = self.oplist[idx + j](states[inp], None, selected_idxs[idx + j])
hlist.append(h)
if self.torch_flag:
s = sum(hlist)
else:
s = hlist[0]
for ii in range(1, len(hlist)):
s += hlist[ii]
states.append(s)
idx += len(self.out_inp_list[i])
states_list = ()
for i in self._concat:
states_list += (states[i],)
# states_list = tuple([states[i] for i in self._concat])
return ops.concat(states_list)
@ClassFactory.register(ClassType.NETWORK)
class NormalCell(Cell):
"""Normal Cell structure according to desc."""
def __init__(self, genotype, steps, concat, reduction_prev=None, C_prev_prev=None, C_prev=None, C=None):
super(NormalCell, self).__init__(genotype, steps, concat, False, reduction_prev, C_prev_prev, C_prev, C)
@ClassFactory.register(ClassType.NETWORK)
class ReduceCell(Cell):
"""Reduce Cell structure according to desc."""
def __init__(self, genotype, steps, concat, reduction_prev=None, C_prev_prev=None, C_prev=None, C=None):
super(ReduceCell, self).__init__(genotype, steps, concat, True, reduction_prev, C_prev_prev, C_prev, C)
@ClassFactory.register(ClassType.NETWORK)
class ContextualCell_v1(ops.Module):
"""New contextual cell design."""
def __init__(self, op_names, config, inp, repeats=1, concat=False):
"""Construct ContextualCell_v1 class.
:param op_names: list of operation indices
:param config: list of config numbers
:param inp: input channel
:param repeats: number of repeated times
:param concat: concat the result if set to True, otherwise add the result
"""
super(ContextualCell_v1, self).__init__()
self.ops = ops.MoudleList()
self._pos = []
self._collect_inds = [0]
self._pools = ['x']
for ind, op in enumerate(config):
# first op is always applied on x
if ind == 0:
pos = 0
op_id = op
self._collect_inds.remove(pos)
op_name = op_names[op_id]
# turn-off scaling in batch norm
self.ops.append(OPS[op_name](inp, 1, True, repeats))
self._pos.append(pos)
self._collect_inds.append(ind + 1)
self._pools.append('{}({})'.format(op_name, self._pools[pos]))
else:
pos1, pos2, op_id1, op_id2 = op
# drop op_id from loose ends
for ind2, (pos, op_id) in enumerate(zip([pos1, pos2], [op_id1, op_id2])):
if pos in self._collect_inds:
self._collect_inds.remove(pos)
op_name = op_names[op_id]
# turn-off scaling in batch norm
self.ops.append(OPS[op_name](inp, 1, True, repeats))
self._pos.append(pos)
# self._collect_inds.append(ind * 3 + ind2 - 1) # Do not collect intermediate
self._pools.append('{}({})'.format(
op_name, self._pools[pos]))
# summation
op_name = 'sum'
self.ops.append(AggregateCell(size_1=None, size_2=None, agg_size=inp, pre_transform=False,
concat=concat)) # turn-off convbnrelu
self._pos.append([ind * 3 - 1, ind * 3])
self._collect_inds.append(ind * 3 + 1)
self._pools.append('{}({},{})'.format(
op_name, self._pools[ind * 3 - 1], self._pools[ind * 3]))
def call(self, x):
"""Do an inference on ContextualCell_v1.
:param x: input tensor
:return: output tensor
"""
feats = [x]
for pos, op in zip(self._pos, self.ops):
if isinstance(pos, list):
assert len(pos) == 2, "Two ops must be provided"
feats.append(op(feats[pos[0]], feats[pos[1]]))
else:
feats.append(op(feats[pos]))
out = 0
for i in self._collect_inds:
out += feats[i]
return out
@ClassFactory.register(ClassType.NETWORK)
class AggregateCell(ops.Module):
"""Aggregate two cells and sum or concat them up."""
def __init__(self, size_1, size_2, agg_size, pre_transform=True, concat=False):
"""Construct AggregateCell.
:param size_1: channel of first input
:param size_2: channel of second input
:param agg_size: channel of aggregated tensor
:param pre_transform: whether to do a transform on two inputs
:param concat: concat the result if set to True, otherwise add the result
"""
super(AggregateCell, self).__init__()
self.pre_transform = pre_transform
self.concat = concat
if self.pre_transform:
self.branch_1 = conv_bn_relu(size_1, agg_size, 1, 1, 0)
self.branch_2 = conv_bn_relu(size_2, agg_size, 1, 1, 0)
if self.concat:
self.conv1x1 = conv_bn_relu(agg_size * 2, agg_size, 1, 1, 0)
def call(self, x1, x2):
"""Do an inference on AggregateCell.
:param x1: first input
:param x2: second input
:return: output
"""
if self.pre_transform:
x1 = self.branch_1(x1)
x2 = self.branch_2(x2)
if tuple(ops.get_shape(x1)[2:]) > tuple(ops.get_shape(x2)[2:]):
x2 = ops.interpolate(x2, size=ops.get_shape(
x1)[2:], mode='bilinear', align_corners=True)
elif tuple(ops.get_shape(x1)[2:]) < tuple(ops.get_shape(x2)[2:]):
x1 = ops.interpolate(x1, size=ops.get_shape(
x2)[2:], mode='bilinear', align_corners=True)
if self.concat:
return self.conv1x1(ops.concat([x1, x2]))
else:
return x1 + x2
| [
"[email protected]"
] | |
00f9596b9cdd8422623d02cb8b0bb31ff158a62c | 27b4d1b7723845812111a0c6c659ef87c8da2755 | /face_recognition/02.py | 688d0fa1923af488a037fb7b805f09b4f024848a | [] | no_license | NAMEs/Python_Note | 59a6eff7b4287aaef04bd69fbd4af3faf56cccb4 | f560e00af37c4f22546abc4c2756e7037adcc40c | refs/heads/master | 2022-04-11T09:32:17.512962 | 2020-03-17T09:30:58 | 2020-03-17T09:30:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | '''
自动找到图像中的所有面孔
'''
from PIL import Image
import face_recognition
#加载图片
image1 = face_recognition.load_image_file('./unknow/5.jpg')
# 返回一个列表,列表中用元组保存图片中的所有人脸的位置
face_locations = face_recognition.face_locations(image1)
# print(face_locations)
print("There are {} people in the picture.".format(len(face_locations)))
for face_location in face_locations:
# 元组中图片坐标为上,右,下,左
top,right,bottom,left = face_location
face_image = image1[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.show() | [
"[email protected]"
] | |
2bb3173ee451d64125b9bd935ef9020db64cf605 | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/tensorboard/plugins/core/core_plugin.py | d09c2e5e23649249a4a751fa162d9e0afb36e843 | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 21,021 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard core plugin package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import gzip
import math
import mimetypes
import os
import zipfile
import six
from werkzeug import utils
from werkzeug import wrappers
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
# If no port is specified, try to bind to this port. See help for --port
# for more details.
DEFAULT_PORT = 6006
class CorePlugin(base_plugin.TBPlugin):
"""Core plugin for TensorBoard.
This plugin serves runs, configuration data, and static assets. This plugin
should always be present in a TensorBoard WSGI application.
"""
plugin_name = 'core'
def __init__(self, context):
"""Instantiates CorePlugin.
Args:
context: A base_plugin.TBContext instance.
"""
logdir_spec = context.flags.logdir_spec if context.flags else ''
self._logdir = context.logdir or logdir_spec
self._db_uri = context.db_uri
self._window_title = context.window_title
self._multiplexer = context.multiplexer
self._db_connection_provider = context.db_connection_provider
self._assets_zip_provider = context.assets_zip_provider
if context.flags and context.flags.generic_data == 'true':
self._data_provider = context.data_provider
else:
self._data_provider = None
def is_active(self):
return True
def get_plugin_apps(self):
apps = {
'/___rPc_sWiTcH___': self._send_404_without_logging,
'/audio': self._redirect_to_index,
'/data/environment': self._serve_environment,
'/data/logdir': self._serve_logdir,
'/data/runs': self._serve_runs,
'/data/experiments': self._serve_experiments,
'/data/experiment_runs': self._serve_experiment_runs,
'/data/window_properties': self._serve_window_properties,
'/events': self._redirect_to_index,
'/favicon.ico': self._send_404_without_logging,
'/graphs': self._redirect_to_index,
'/histograms': self._redirect_to_index,
'/images': self._redirect_to_index,
}
apps.update(self.get_resource_apps())
return apps
def get_resource_apps(self):
apps = {}
if not self._assets_zip_provider:
return apps
with self._assets_zip_provider() as fp:
with zipfile.ZipFile(fp) as zip_:
for path in zip_.namelist():
gzipped_asset_bytes = _gzip(zip_.read(path))
wsgi_app = functools.partial(
self._serve_asset, path, gzipped_asset_bytes)
apps['/' + path] = wsgi_app
apps['/'] = apps['/index.html']
return apps
@wrappers.Request.application
def _send_404_without_logging(self, request):
return http_util.Respond(request, 'Not found', 'text/plain', code=404)
@wrappers.Request.application
def _redirect_to_index(self, unused_request):
return utils.redirect('/')
@wrappers.Request.application
def _serve_asset(self, path, gzipped_asset_bytes, request):
"""Serves a pre-gzipped static asset from the zip file."""
mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
return http_util.Respond(
request, gzipped_asset_bytes, mimetype, content_encoding='gzip')
@wrappers.Request.application
def _serve_environment(self, request):
"""Serve a JSON object containing some base properties used by the frontend.
* data_location is either a path to a directory or an address to a
database (depending on which mode TensorBoard is running in).
* window_title is the title of the TensorBoard web page.
"""
if self._data_provider:
experiment = plugin_util.experiment_id(request.environ)
data_location = self._data_provider.data_location(experiment)
else:
data_location = self._logdir or self._db_uri
return http_util.Respond(
request,
{
'data_location': data_location,
'window_title': self._window_title,
},
'application/json')
@wrappers.Request.application
def _serve_logdir(self, request):
"""Respond with a JSON object containing this TensorBoard's logdir."""
# TODO(chihuahua): Remove this method once the frontend instead uses the
# /data/environment route (and no deps throughout Google use the
# /data/logdir route).
return http_util.Respond(
request, {'logdir': self._logdir}, 'application/json')
@wrappers.Request.application
def _serve_window_properties(self, request):
"""Serve a JSON object containing this TensorBoard's window properties."""
# TODO(chihuahua): Remove this method once the frontend instead uses the
# /data/environment route.
return http_util.Respond(
request, {'window_title': self._window_title}, 'application/json')
@wrappers.Request.application
def _serve_runs(self, request):
"""Serve a JSON array of run names, ordered by run started time.
Sort order is by started time (aka first event time) with empty times sorted
last, and then ties are broken by sorting on the run name.
"""
if self._data_provider:
experiment = plugin_util.experiment_id(request.environ)
runs = sorted(
self._data_provider.list_runs(experiment_id=experiment),
key=lambda run: (
run.start_time if run.start_time is not None else float('inf'),
run.run_name,
)
)
run_names = [run.run_name for run in runs]
elif self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
run_name,
started_time IS NULL as started_time_nulls_last,
started_time
FROM Runs
ORDER BY started_time_nulls_last, started_time, run_name
''')
run_names = [row[0] for row in cursor]
else:
# Python's list.sort is stable, so to order by started time and
# then by name, we can just do the sorts in the reverse order.
run_names = sorted(self._multiplexer.Runs())
def get_first_event_timestamp(run_name):
try:
return self._multiplexer.FirstEventTimestamp(run_name)
except ValueError as e:
logger.warn(
'Unable to get first event timestamp for run %s: %s', run_name, e)
# Put runs without a timestamp at the end.
return float('inf')
run_names.sort(key=get_first_event_timestamp)
return http_util.Respond(request, run_names, 'application/json')
@wrappers.Request.application
def _serve_experiments(self, request):
"""Serve a JSON array of experiments. Experiments are ordered by experiment
started time (aka first event time) with empty times sorted last, and then
ties are broken by sorting on the experiment name.
"""
results = self.list_experiments_impl()
return http_util.Respond(request, results, 'application/json')
def list_experiments_impl(self):
results = []
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
experiment_id,
experiment_name,
started_time,
started_time IS NULL as started_time_nulls_last
FROM Experiments
ORDER BY started_time_nulls_last, started_time, experiment_name,
experiment_id
''')
results = [{
"id": row[0],
"name": row[1],
"startTime": row[2],
} for row in cursor]
return results
@wrappers.Request.application
def _serve_experiment_runs(self, request):
"""Serve a JSON runs of an experiment, specified with query param
`experiment`, with their nested data, tag, populated. Runs returned are
ordered by started time (aka first event time) with empty times sorted last,
and then ties are broken by sorting on the run name. Tags are sorted by
its name, displayName, and lastly, inserted time.
"""
results = []
if self._db_connection_provider:
exp_id = plugin_util.experiment_id(request.environ)
runs_dict = collections.OrderedDict()
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
Runs.run_id,
Runs.run_name,
Runs.started_time,
Runs.started_time IS NULL as started_time_nulls_last,
Tags.tag_id,
Tags.tag_name,
Tags.display_name,
Tags.plugin_name,
Tags.inserted_time
From Runs
LEFT JOIN Tags ON Runs.run_id = Tags.run_id
WHERE Runs.experiment_id = ?
AND (Tags.tag_id IS NULL OR Tags.plugin_name IS NOT NULL)
ORDER BY started_time_nulls_last,
Runs.started_time,
Runs.run_name,
Runs.run_id,
Tags.tag_name,
Tags.display_name,
Tags.inserted_time;
''', (exp_id,))
for row in cursor:
run_id = row[0]
if not run_id in runs_dict:
runs_dict[run_id] = {
"id": run_id,
"name": row[1],
"startTime": math.floor(row[2]),
"tags": [],
}
# tag can be missing.
if row[4]:
runs_dict[run_id].get("tags").append({
"id": row[4],
"displayName": row[6],
"name": row[5],
"pluginName": row[7],
})
results = list(runs_dict.values())
return http_util.Respond(request, results, 'application/json')
class CorePluginLoader(base_plugin.TBLoader):
"""CorePlugin factory."""
def define_flags(self, parser):
"""Adds standard TensorBoard CLI flags to parser."""
parser.add_argument(
'--logdir',
metavar='PATH',
type=str,
default='',
help='''\
Directory where TensorBoard will look to find TensorFlow event files
that it can display. TensorBoard will recursively walk the directory
structure rooted at logdir, looking for .*tfevents.* files.
A leading tilde will be expanded with the semantics of Python's
os.expanduser function.
''')
parser.add_argument(
'--logdir_spec',
metavar='PATH_SPEC',
type=str,
default='',
help='''\
Like `--logdir`, but with special interpretation for commas and colons:
commas separate multiple runs, where a colon specifies a new name for a
run. For example:
`tensorboard --logdir_spec=name1:/path/to/logs/1,name2:/path/to/logs/2`.
This flag is discouraged and can usually be avoided. TensorBoard walks
log directories recursively; for finer-grained control, prefer using a
symlink tree. Some features may not work when using `--logdir_spec`
instead of `--logdir`.
''')
parser.add_argument(
'--host',
metavar='ADDR',
type=str,
default=None, # like localhost, but prints a note about `--bind_all`
help='''\
What host to listen to (default: localhost). To serve to the entire local
network on both IPv4 and IPv6, see `--bind_all`, with which this option is
mutually exclusive.
''')
parser.add_argument(
'--bind_all',
action='store_true',
help='''\
Serve on all public interfaces. This will expose your TensorBoard instance to
the network on both IPv4 and IPv6 (where available). Mutually exclusive with
`--host`.
''')
parser.add_argument(
'--port',
metavar='PORT',
type=lambda s: (None if s == "default" else int(s)),
default="default",
help='''\
Port to serve TensorBoard on. Pass 0 to request an unused port selected
by the operating system, or pass "default" to try to bind to the default
port (%s) but search for a nearby free port if the default port is
unavailable. (default: "default").\
''' % DEFAULT_PORT)
parser.add_argument(
'--purge_orphaned_data',
metavar='BOOL',
# Custom str-to-bool converter since regular bool() doesn't work.
type=lambda v: {'true': True, 'false': False}.get(v.lower(), v),
choices=[True, False],
default=True,
help='''\
Whether to purge data that may have been orphaned due to TensorBoard
restarts. Setting --purge_orphaned_data=False can be used to debug data
disappearance. (default: %(default)s)\
''')
parser.add_argument(
'--db',
metavar='URI',
type=str,
default='',
help='''\
[experimental] sets SQL database URI and enables DB backend mode, which is
read-only unless --db_import is also passed.\
''')
parser.add_argument(
'--db_import',
action='store_true',
help='''\
[experimental] enables DB read-and-import mode, which in combination with
--logdir imports event files into a DB backend on the fly. The backing DB is
temporary unless --db is also passed to specify a DB path to use.\
''')
parser.add_argument(
'--inspect',
action='store_true',
help='''\
Prints digests of event files to command line.
This is useful when no data is shown on TensorBoard, or the data shown
looks weird.
Must specify one of `logdir` or `event_file` flag.
Example usage:
`tensorboard --inspect --logdir mylogdir --tag loss`
See tensorboard/backend/event_processing/event_file_inspector.py for more info.\
''')
# This flag has a "_tb" suffix to avoid conflicting with an internal flag
# named --version. Note that due to argparse auto-expansion of unambiguous
# flag prefixes, you can still invoke this as `tensorboard --version`.
parser.add_argument(
'--version_tb',
action='store_true',
help='Prints the version of Tensorboard')
parser.add_argument(
'--tag',
metavar='TAG',
type=str,
default='',
help='tag to query for; used with --inspect')
parser.add_argument(
'--event_file',
metavar='PATH',
type=str,
default='',
help='''\
The particular event file to query for. Only used if --inspect is
present and --logdir is not specified.\
''')
parser.add_argument(
'--path_prefix',
metavar='PATH',
type=str,
default='',
help='''\
An optional, relative prefix to the path, e.g. "/path/to/tensorboard".
resulting in the new base url being located at
localhost:6006/path/to/tensorboard under default settings. A leading
slash is required when specifying the path_prefix. A trailing slash is
optional and has no effect. The path_prefix can be leveraged for path
based routing of an ELB when the website base_url is not available e.g.
"example.site.com/path/to/tensorboard/".\
''')
parser.add_argument(
'--window_title',
metavar='TEXT',
type=str,
default='',
help='changes title of browser window')
parser.add_argument(
'--max_reload_threads',
metavar='COUNT',
type=int,
default=1,
help='''\
The max number of threads that TensorBoard can use to reload runs. Not
relevant for db read-only mode. Each thread reloads one run at a time.
(default: %(default)s)\
''')
parser.add_argument(
'--reload_interval',
metavar='SECONDS',
type=float,
default=5.0,
help='''\
How often the backend should load more data, in seconds. Set to 0 to
load just once at startup and a negative number to never reload at all.
Not relevant for DB read-only mode. (default: %(default)s)\
''')
parser.add_argument(
'--reload_task',
metavar='TYPE',
type=str,
default='auto',
choices=['auto', 'thread', 'process', 'blocking'],
help='''\
[experimental] The mechanism to use for the background data reload task.
The default "auto" option will conditionally use threads for legacy reloading
and a child process for DB import reloading. The "process" option is only
useful with DB import mode. The "blocking" option will block startup until
reload finishes, and requires --load_interval=0. (default: %(default)s)\
''')
parser.add_argument(
'--reload_multifile',
metavar='BOOL',
# Custom str-to-bool converter since regular bool() doesn't work.
type=lambda v: {'true': True, 'false': False}.get(v.lower(), v),
choices=[True, False],
default=None,
help='''\
[experimental] If true, this enables experimental support for continuously
polling multiple event files in each run directory for newly appended data
(rather than only polling the last event file). Event files will only be
polled as long as their most recently read data is newer than the threshold
defined by --reload_multifile_inactive_secs, to limit resource usage. Beware
of running out of memory if the logdir contains many active event files.
(default: false)\
''')
parser.add_argument(
'--reload_multifile_inactive_secs',
metavar='SECONDS',
type=int,
default=4000,
help='''\
[experimental] Configures the age threshold in seconds at which an event file
that has no event wall time more recent than that will be considered an
inactive file and no longer polled (to limit resource usage). If set to -1,
no maximum age will be enforced, but beware of running out of memory and
heavier filesystem read traffic. If set to 0, this reverts to the older
last-file-only polling strategy (akin to --reload_multifile=false).
(default: %(default)s - intended to ensure an event file remains active if
it receives new data at least once per hour)\
''')
parser.add_argument(
'--generic_data',
metavar='TYPE',
type=str,
default='auto',
choices=['false', 'auto', 'true'],
help='''\
[experimental] Whether to use generic data provider infrastructure. The
"auto" option enables this only for dashboards that are considered
stable under the new codepaths. (default: %(default)s)\
''')
parser.add_argument(
'--samples_per_plugin',
type=str,
default='',
help='''\
An optional comma separated list of plugin_name=num_samples pairs to
explicitly specify how many samples to keep per tag for that plugin. For
unspecified plugins, TensorBoard randomly downsamples logged summaries
to reasonable values to prevent out-of-memory errors for long running
jobs. This flag allows fine control over that downsampling. Note that 0
means keep all samples of that type. For instance "scalars=500,images=0"
keeps 500 scalars and all images. Most users should not need to set this
flag.\
''')
def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir_spec:
raise FlagsError('--logdir_spec is not supported with --inspect.')
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif flags.logdir and flags.logdir_spec:
raise FlagsError(
'May not specify both --logdir and --logdir_spec')
elif not flags.db and not flags.logdir and not flags.logdir_spec:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
elif flags.host is not None and flags.bind_all:
raise FlagsError('Must not specify both --host and --bind_all.')
flags.path_prefix = flags.path_prefix.rstrip('/')
if flags.path_prefix and not flags.path_prefix.startswith('/'):
raise FlagsError(
'Path prefix must start with slash, but got: %r.' % flags.path_prefix)
def load(self, context):
"""Creates CorePlugin instance."""
return CorePlugin(context)
def _gzip(bytestring):
out = six.BytesIO()
# Set mtime to zero for deterministic results across TensorBoard launches.
with gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3, mtime=0) as f:
f.write(bytestring)
return out.getvalue()
| [
"[email protected]"
] | |
5335d5257663403a517f518fdf9c961756784406 | 3e4c69317323bca865b025503b60bf83d3ae65f8 | /tests/utils/test_acmg.py | 6132c8f86287fe797726022e906f8235c7898b7c | [
"BSD-3-Clause"
] | permissive | tapaswenipathak/scout | f59beaa997a45487ac96c3b3e560b5e5aa9b30ae | c9b3ec14f5105abe6066337110145a263320b4c5 | refs/heads/master | 2020-05-30T11:13:25.662300 | 2019-05-28T09:26:25 | 2019-05-28T09:26:25 | 189,694,812 | 1 | 0 | BSD-3-Clause | 2019-06-01T05:36:35 | 2019-06-01T05:36:34 | null | UTF-8 | Python | false | false | 11,264 | py | from scout.utils.acmg import (is_pathogenic, is_likely_pathogenic, is_benign,
is_likely_benign, get_acmg)
def test_is_pathogenic_1():
"""First criterias for pathogenic:
Pathogenic
(i) 1 Very strong (PVS1) AND
(a) ≥1 Strong (PS1–PS4) OR
(b) ≥2 Moderate (PM1–PM6) OR
(c) 1 Moderate (PM1–PM6) and 1 supporting (PP1–PP5) OR
(d) ≥2 Supporting (PP1–PP5)
"""
# GIVEN values that fulfill the (a) criteria for pathogenic
pvs = True
ps_terms = ['PS1']
pm_terms = []
pp_terms = []
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
ps_terms = []
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
ps_terms = ['PS1', 'PS2']
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert res
# GIVEN values that fulfill the (b) criteria for pathogenic
pvs = True
ps_terms = []
pm_terms = ['PM1', 'PM2']
pp_terms = []
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
## GIVEN one to few moderate terms
pm_terms = ['PM2']
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are not fullfilled
assert not res
# GIVEN values that fulfill the (c) criteria for pathogenic
pvs = True
ps_terms = []
pm_terms = ['PM1']
pp_terms = ['PP1']
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
# GIVEN values that fulfill the (d) criteria for pathogenic
pvs = True
ps_terms = []
pm_terms = []
pp_terms = ['PP1', 'PP2']
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
## GIVEN pvs and one supporting term
pp_terms = ['PP1']
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are not fullfilled
assert not res
def test_is_pathogenic_2():
"""First criterias for pathogenic:
Pathogenic
(ii) ≥2 Strong (PS1–PS4) OR
"""
# GIVEN values that fulfill the (ii) criteria for pathogenic
pvs = False
ps_terms = ['PS1', 'PS2']
pm_terms = []
pp_terms = []
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
ps_terms = ['PS1']
## WHEN performing the evaluation wit only one strong term
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are not fullfilled
assert not res
def test_is_pathogenic_3():
"""First criterias for pathogenic:
Pathogenic
(iii) 1 Strong (PS1–PS4) AND
(a)≥3 Moderate (PM1–PM6) OR
(b)2 Moderate (PM1–PM6) AND ≥2 Supporting (PP1–PP5) OR
(c)1 Moderate (PM1–PM6) AND ≥4 supporting (PP1–PP5)
"""
# GIVEN values that fulfill the (a) criteria for pathogenic (iii)
pvs = False
ps_terms = ['PS1']
pm_terms = ['PM1', 'PM2', 'PM3']
pp_terms = []
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pm_terms = ['PM1', 'PM2']
## WHEN performing the evaluation wit only two moderate terms
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are not fullfilled
assert not res
# GIVEN values that fulfill the (b) criteria for pathogenic (iii)
pvs = False
ps_terms = ['PS1']
pm_terms = ['PM1', 'PM2']
pp_terms = ['PP1', 'PP2']
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pp_terms = ['PP1']
## WHEN performing the evaluation with only one supporting terms
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are not fullfilled
assert not res
# GIVEN values that fulfill the (c) criteria for pathogenic (iii)
pvs = False
ps_terms = ['PS1']
pm_terms = ['PM1']
pp_terms = ['PP1', 'PP2', 'PP3', 'PP4']
## WHEN performing the evaluation
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pp_terms = ['PP1', 'PP2', 'PP3']
## WHEN performing the evaluation with only three supporting terms
res = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are not fullfilled
assert not res
def test_is_likely_pathogenic_1():
"""First criterias for pathogenic:
Pathogenic
(i) 1 Very strong (PVS1) AND 1 moderate (PM1– PM6)
"""
# GIVEN values that fulfill the (1) criteria for likely pathogenic
pvs = True
ps_terms = []
pm_terms = ['PM1']
pp_terms = []
## WHEN performing the evaluation
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pm_terms = []
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
def test_is_likely_pathogenic_2():
"""First criterias for pathogenic:
Pathogenic
(ii) 1 Strong (PS1–PS4) AND 1–2 moderate (PM1–PM6) OR
"""
# GIVEN values that fulfill the (1) criteria for likely pathogenic
pvs = False
ps_terms = ['PS1']
pm_terms = ['PM1']
pp_terms = []
## WHEN performing the evaluation
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
ps_terms = []
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
def test_is_likely_pathogenic_3():
"""First criterias for pathogenic:
Pathogenic
(iii) 1 Strong (PS1–PS4) AND ≥2 supporting (PP1–PP5) OR
"""
# GIVEN values that fulfill the (1) criteria for likely pathogenic
pvs = False
ps_terms = ['PS1']
pm_terms = []
pp_terms = ['PP1','PP2']
## WHEN performing the evaluation
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pp_terms = ['PP1']
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
def test_is_likely_pathogenic_4():
"""First criterias for pathogenic:
Pathogenic
(iv) ≥3 Moderate (PM1–PM6) OR
"""
# GIVEN values that fulfill the (1) criteria for likely pathogenic
pvs = False
ps_terms = []
pm_terms = ['PM1','PM2','PM3']
pp_terms = []
## WHEN performing the evaluation
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pm_terms = ['PM1','PM2']
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
def test_is_likely_pathogenic_5():
"""First criterias for pathogenic:
Pathogenic
(v) 2 Moderate (PM1–PM6) AND ≥2 supporting (PP1–PP5) OR
"""
# GIVEN values that fulfill the (1) criteria for likely pathogenic
pvs = False
ps_terms = []
pm_terms = ['PM1','PM2']
pp_terms = ['PP1', 'PP2']
## WHEN performing the evaluation
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pp_terms = ['PP1']
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
def test_is_likely_pathogenic_6():
"""First criterias for pathogenic:
Pathogenic
(vi) 1 Moderate (PM1–PM6) AND ≥4 supportin (PP1–PP5)
"""
# GIVEN values that fulfill the (vi) criteria for likely pathogenic
pvs = False
ps_terms = []
pm_terms = ['PM1']
pp_terms = ['PP1', 'PP2', 'PP3', 'PP4']
## WHEN performing the evaluation
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
## THEN assert the criterias are fullfilled
assert res
pp_terms = ['PP1', 'PP2', 'PP3']
res = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
assert not res
def test_is_benign_1():
"""First criterias for benign:
Benign
(i) 1 Stand-alone (BA1) OR
"""
# GIVEN values that fulfill the (i) criteria for benign
ba = True
bs_terms = []
## WHEN performing the evaluation
res = is_benign(ba, bs_terms)
## THEN assert the criterias are fullfilled
assert res
ba = False
res = is_benign(ba, bs_terms)
assert not res
def test_is_benign_2():
"""Second criterias for benign:
Benign
(ii) ≥2 Strong (BS1–BS4)
"""
# GIVEN values that fulfill the (ii) criteria for benign
ba = False
bs_terms = ['BS1', 'BS2']
## WHEN performing the evaluation
res = is_benign(ba, bs_terms)
## THEN assert the criterias are fullfilled
assert res
bs_terms = ['BS1']
res = is_benign(ba, bs_terms)
assert not res
def test_is_likely_benign_1():
"""First criterias for likely benign:
Likely Benign
(i) 1 Strong (BS1–BS4) and 1 supporting (BP1– BP7) OR
"""
# GIVEN values that fulfill the (i) criteria for likely benign
bs_terms = ['BS1']
bp_terms = ['BP1']
## WHEN performing the evaluation
res = is_likely_benign(bs_terms, bp_terms)
## THEN assert the criterias are fullfilled
assert res
bp_terms = []
res = is_likely_benign(bs_terms, bp_terms)
assert not res
def test_is_benign_2():
"""Second criterias for likely benign:
Benign
(ii) ≥2 Supporting (BP1–BP7)
"""
# GIVEN values that fulfill the (ii) criteria for likely benign
bs_terms = []
bp_terms = ['BP1', 'BP2']
## WHEN performing the evaluation
res = is_likely_benign(bs_terms, bp_terms)
## THEN assert the criterias are fullfilled
assert res
bp_terms = ['BP1']
res = is_likely_benign(bs_terms, bp_terms)
assert not res
def test_get_acmg_no_terms():
acmg_terms = []
res = get_acmg(acmg_terms)
assert res == 'uncertain_significance'
def test_get_acmg_pathogenic():
acmg_terms = ['PVS1', 'PS1']
res = get_acmg(acmg_terms)
assert res == 'pathogenic'
acmg_terms = ['PVS1', 'PS1', 'BS1']
res = get_acmg(acmg_terms)
assert res == 'pathogenic'
def test_get_acmg_uncertain():
acmg_terms = ['PVS1']
res = get_acmg(acmg_terms)
assert res == 'uncertain_significance'
acmg_terms = ['PVS1', 'PS1', 'BA1']
res = get_acmg(acmg_terms)
assert res == 'uncertain_significance'
| [
"[email protected]"
] | |
a20075e0059049e6698238fbbf02603234423db8 | 4cf3f8845d64ed31737bd7795581753c6e682922 | /.history/main_20200118153017.py | 11784752a3b75cb54167767e62d3d06c83f12c3a | [] | no_license | rtshkmr/hack-roll | 9bc75175eb9746b79ff0dfa9307b32cfd1417029 | 3ea480a8bf6d0067155b279740b4edc1673f406d | refs/heads/master | 2021-12-23T12:26:56.642705 | 2020-01-19T04:26:39 | 2020-01-19T04:26:39 | 234,702,684 | 1 | 0 | null | 2021-12-13T20:30:54 | 2020-01-18T08:12:52 | Python | UTF-8 | Python | false | false | 256,643 | py | from telegram.ext import Updater, CommandHandler
import requests
import re
# API call to source, get json (url is obtained):
contents = requests.get('https://random.dog/woof.json').json()
image_url = contents['url']
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
4edf1fe1a5e22527573a5583c5b23eb261503734 | 308c6fb81f0023b9d5682731c10402ce6a2ebb49 | /django-coreyschafer/bin/wheel | 22071a9376b83a7a00b89f03cc28124ac54f7bc7 | [] | no_license | frclasso/django-project1 | 221646ddc9a6702ab8ab2b0e475f4eed09411457 | 9527e30f5f6d54be2a77524411f114441c968a92 | refs/heads/master | 2020-07-06T13:42:28.314546 | 2019-08-26T00:55:25 | 2019-08-26T00:55:25 | 203,033,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | #!/home/fabio-gurus/Desktop/repositories/django_projects/django-coreyschafer/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f6d9cbd62c65febcdc5836464a6acb874d242b87 | 197420c1f28ccb98059888dff214c9fd7226e743 | /Адаптивный_тренажёр/1.All_including/1.All_including.py | aa7587c23d75e4631ca3603a350790351678c14f | [] | no_license | Vovanuch/python-basics-1 | fc10b6f745defff31364b66c65a704a9cf05d076 | a29affec12e8b80a1d3beda3a50cde4867b1dee2 | refs/heads/master | 2023-07-06T17:10:46.341121 | 2021-08-06T05:38:19 | 2021-08-06T05:38:19 | 267,504,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | '''
Напишите программу, которая находит все позиции вхождения подстроки в строку.
Формат ввода:
На первой строке содержится исходная строка, на второй строке ввода указана подстрока, позиции которой требуется найти. Строки состоят из символов латинского алфавита.
Формат вывода:
Строка, содержащая индексы (индексация начинается с нуля) вхождения подстроки в строку, разделённые пробелом или число -1 в случае, когда подстрока не найдена.
Sample Input 1:
abacabadaba
aba
Sample Output 1:
0 4 8
Sample Input 2:
aaaa
aa
Sample Output 2:
0 1 2
Sample Input 3:
abc
d
Sample Output 3:
-1
'''
s = input()
a = input()
is_part = False
for i in range(len(s)):
if s[i:].startswith(a):
print(i, end=' ')
is_part = True
if not is_part:
print(-1) | [
"[email protected]"
] | |
50b9a5870c5e0c488973768abc415d1754612da4 | 2324dea2cb3003c8ab7e8fd80588d44973eb8c77 | /Euler_6_273.py | 28a9e0a2585490ed604b3493f38cb928c2fa2009 | [] | no_license | MikeOcc/MyProjectEulerFiles | 5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56 | 4d066d52380aade215636953589bf56d6b88f745 | refs/heads/master | 2021-01-16T18:45:44.133229 | 2015-05-27T18:28:43 | 2015-05-27T18:28:43 | 5,876,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #
#
# Euler Problem 273
#
#
from Functions import IsPrime,RetFact
from itertools import combinations
def Prod(f):
retval = 1
for vv in f:
retval *= vv
return retval
p=[]
S=set([])
for k in range(1,39):
v = 4*k + 1
if IsPrime(v): p+=[v]
print p
l = len(p)
summ = 0
cnt = 0
for i in range(1,3):
n = combinations(p,i)
for t in n:
N = Prod(t)
for c in xrange(1,int(N**.5)+1):
u = c**2
v = (N - u) **.5
if v == int(v) and c<v:
summ += c
#print "Found", N, c,v,t,RetFact(c),RetFact(v),(c*1.0)/N
if N not in S:S.add(N)
cnt += 1
print "Sum of S(N) is ", summ
print "number of items is ", cnt
print
print sorted(S) | [
"[email protected]"
] | |
a467f80425555a9c632f0c29053bb48fe8de5aa0 | af35f890c0c6a2fa531f47a4c2ed132e8920190d | /python/leetcode/string/1111_nesting_depth.py | bc58f9173f4a28b783a17a8182e19c032ec63a0c | [] | no_license | Levintsky/topcoder | b1b17cd3fddef5a23297bcbe4e165508d09a655d | a5cb862f0c5a3cfd21468141800568c2dedded0a | refs/heads/master | 2021-06-23T10:15:27.839199 | 2021-02-01T07:49:48 | 2021-02-01T07:49:48 | 188,175,357 | 0 | 1 | null | 2020-05-19T09:25:12 | 2019-05-23T06:33:38 | C | UTF-8 | Python | false | false | 3,287 | py | """
1111. Maximum Nesting Depth of Two Valid Parentheses Strings (Medium)
A string is a valid parentheses string (denoted VPS) if and only if it consists of
"(" and ")" characters only, and:
It is the empty string, or
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0,
1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS seq, split it into two disjoint subsequences A and B, such that A
and B are VPS's (and A.length + B.length = seq.length).
Now choose any such A and B such that max(depth(A), depth(B)) is the
minimum possible value.
Return an answer array (of length seq.length) that encodes such a choice of
A and B: answer[i] = 0 if seq[i] is part of A, else answer[i] = 1. Note
that even though multiple answers may exist, you may return any of them.
Example 1:
Input: seq = "(()())"
Output: [0,1,1,1,1,0]
Example 2:
Input: seq = "()(())()"
Output: [0,0,0,1,1,0,1,1]
Constraints:
1 <= seq.size <= 10000
"""
class Solution(object):
def maxDepthAfterSplit(self, seq):
"""
:type seq: str
:rtype: List[int]
"""
n = len(seq)
if n == 0: return []
self.result = [-1] * n
self.parse(seq, 0, 0, n-1)
max_h = max(self.result) + 1
thr = (max_h - 1) // 2
for i in range(n):
if self.result[i] <= thr:
self.result[i] = 0
else:
self.result[i] = 1
return self.result
def parse(self, seq, depth, i, j):
if i >= j: return
# go through and split
slist = []
st = i
cnt = 0
for ii in range(i, j+1):
if seq[ii] == ")":
cnt -= 1
else:
cnt += 1
if cnt == 0:
slist.append([st, ii])
st = ii+1
# case 1:
if len(slist) == 1:
self.result[i] = depth
self.result[j] = depth
self.parse(seq, depth+1, i+1, j-1)
else:
for st, end in slist:
self.parse(seq, depth, st, end)
def solve2(self, seq):
n = len(seq)
res = [0] * n
cnt = 0
for i, c in enumerate(seq):
if i == 0:
res[i] = cnt
if c == "(":
cnt += 1
else:
cnt -= 1
else:
if c == "(":
res[i] = cnt
cnt += 1
else:
cnt -= 1
res[i] = cnt
thr = (max(res) - 1) // 2
for i in range(n):
if res[i] <= thr:
res[i] = 0
else:
res[i] = 1
return res
if __name__ == "__main__":
a = Solution()
# print(a.maxDepthAfterSplit("(()())"))
# rint(a.maxDepthAfterSplit("()(((())))()"))
print(a.solve2("(()())"))
print(a.solve2("()(())()"))
| [
"[email protected]"
] | |
7d9a15a515e928321da6c01ad4a4b0c6c281c704 | e6a5fce33aad4fcba37842e135a51ba441b06f48 | /Algorithms/Strings/BeautifulBinaryString.py | 788b7087da8bc68a467ba09d592dd229b79f83b4 | [
"MIT"
] | permissive | pavstar619/HackerRank | 6710ddd450b06fbb69da5abad9f570e5e26bbbc0 | 697ee46b6e621ad884a064047461d7707b1413cd | refs/heads/master | 2020-06-18T18:53:53.421685 | 2020-02-18T09:35:48 | 2020-02-18T09:35:48 | 196,408,726 | 0 | 0 | MIT | 2019-07-11T14:18:16 | 2019-07-11T14:18:16 | null | UTF-8 | Python | false | false | 233 | py | #!/bin/python3
class Main():
def __init__(self):
self.n = int(input())
self.s = input()
def output(self):
print(self.s.count('010'))
if __name__ == '__main__':
obj = Main()
obj.output()
| [
"[email protected]"
] | |
1d392f1ad1a591ca3f59ee411b29ba1720d86905 | 770d4df866b9e66a333f3ffeacdd659b8553923a | /results/0175/config.py | 2eb890c054ba7a06a78c2e58b47304712cdacb6d | [] | no_license | leojo/ResultsOverview | b2062244cbd81bc06b99963ae9b1695fa9718f90 | a396abc7a5b4ab257150c0d37c40b646ebb13fcf | refs/heads/master | 2020-03-20T19:52:37.217926 | 2018-08-05T12:50:27 | 2018-08-05T12:50:27 | 137,656,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,597 | py | import os
import numpy as np
import waveUtils
class config(object):
def __init__(self):
self.prepare_data()
# Bsub arguments
bsub_mainfile = "main.py"
bsub_processors = 4
bsub_timeout = "4:00"
bsub_memory = 8000
# Epoch and batch config
batch_size = 128
latent_dim = 100
epochs = 100
epoch_updates = 100
# Network structure
input_s = 16000
n_ae = 5
n_conv_layers = 3
n_deconv_layers = 3
first_size = input_s // (2 ** n_deconv_layers)
final_decoder_filter_size = 3
# Model
load_model = False
model_path = os.path.join("models", "0103", "model") # only used if load_model=True
# Miscellaneous constants
sample_rate = 8000
reconstruction_mult = 1
learning_rate_min = 1e-4
learning_rate_max = 1e-4
learning_rate_scaling_factor = 0 # controlls the shape of the scaling curve from max to min learning rate
learning_rate = 1e-3 # legacy
kl_loss_mult = 1e-3
kl_extra_mult = 2
kl_extra_exponent = 2
keep_prob = 1
use_square = False
data_sources = ["clarinet","trumpet"]
data = None
# Functions
def prepare_data(self):
self.load_data()
def load_and_prepare_audio(self, source):
duration = self.input_s / float(self.sample_rate)
data_dir = os.path.join("wav_files", source)
waves, original_sample_rate = waveUtils.loadAudioFiles(data_dir)
cut_data = waveUtils.extractHighestMeanIntensities(waves, sample_rate=original_sample_rate, duration=duration)
del waves
data = waveUtils.reduceQuality(cut_data, self.sample_rate, duration)
del cut_data
return data
def load_data(self):
if self.data is None:
self.data = [self.load_and_prepare_audio(source) for source in self.data_sources]
def get_training_batch(self):
samples = []
originals = []
num_sources = len(self.data_sources)
sample_shape = self.data[0][0].shape
for _ in range(self.batch_size):
waves = []
sample = np.zeros(sample_shape)
for s in range(num_sources):
i = np.random.randint(len(self.data[s]))
wave = self.data[s][i]
waves.append(wave)
sample += wave
sample = sample/num_sources
samples.append(sample)
originals.append(waves)
samples = np.asarray(samples)
originals = np.asarray(originals)
return samples, originals
def normalize_batch(self, batch):
x = batch.astype(np.float32)
return x / np.max(np.abs(x))
def deconv_filter_size(self, i):
return (2 * (i + 1)) + 1
def deconv_channel_num(self, i):
return 2 ** (config.n_deconv_layers + 3 - i)
def conv_filter_size(self, i):
return (2 * (config.n_conv_layers - i)) + 1
def conv_channel_num(self, i):
return 2 ** (i + 4)
| [
"[email protected]"
] | |
1e0458e35481d98afa1fca38f79c63efacd7cb96 | b24fa24a96036253a0cd168ac8f6dd41c9102b0a | /backend/test1_20020/urls.py | 9496714a7230f5aec24f0c409c3dbda66af31988 | [] | no_license | crowdbotics-apps/test1-20020 | a13c64c66d2028f99723f38e0341873a46d3ae3f | 9d59b62483172e4900990c07afed60f443c211bc | refs/heads/master | 2022-12-07T00:18:07.268473 | 2020-09-04T03:27:46 | 2020-09-04T03:27:46 | 292,737,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,457 | py | """test1_20020 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("taxi_profile.api.v1.urls")),
path("taxi_profile/", include("taxi_profile.urls")),
path("api/v1/", include("booking.api.v1.urls")),
path("booking/", include("booking.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("vehicle.api.v1.urls")),
path("vehicle/", include("vehicle.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
]
admin.site.site_header = "test1"
admin.site.site_title = "test1 Admin Portal"
admin.site.index_title = "test1 Admin"
# swagger
api_info = openapi.Info(
title="test1 API",
default_version="v1",
description="API documentation for test1 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
04700fd35f4dee4fdb63470cffc81c8f21ac4397 | bc441bb06b8948288f110af63feda4e798f30225 | /capacity_admin_sdk/model/metadata_center/stream_translate_states_pb2.py | 1a9a4ed46dc1768d123cadc769e2544b8b98a8b2 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,750 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stream_translate_states.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from capacity_admin_sdk.model.metadata_center import stream_translate_package_pb2 as capacity__admin__sdk_dot_model_dot_metadata__center_dot_stream__translate__package__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stream_translate_states.proto',
package='metadata_center',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_center'),
serialized_pb=_b('\n\x1dstream_translate_states.proto\x12\x0fmetadata_center\x1aGcapacity_admin_sdk/model/metadata_center/stream_translate_package.proto\"o\n\x15StreamTranslateStates\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x38\n\x07payload\x18\x03 \x03(\x0b\x32\'.metadata_center.StreamTranslatePackageBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_centerb\x06proto3')
,
dependencies=[capacity__admin__sdk_dot_model_dot_metadata__center_dot_stream__translate__package__pb2.DESCRIPTOR,])
_STREAMTRANSLATESTATES = _descriptor.Descriptor(
name='StreamTranslateStates',
full_name='metadata_center.StreamTranslateStates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='metadata_center.StreamTranslateStates.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='metadata_center.StreamTranslateStates.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='metadata_center.StreamTranslateStates.payload', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=234,
)
_STREAMTRANSLATESTATES.fields_by_name['payload'].message_type = capacity__admin__sdk_dot_model_dot_metadata__center_dot_stream__translate__package__pb2._STREAMTRANSLATEPACKAGE
DESCRIPTOR.message_types_by_name['StreamTranslateStates'] = _STREAMTRANSLATESTATES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StreamTranslateStates = _reflection.GeneratedProtocolMessageType('StreamTranslateStates', (_message.Message,), {
'DESCRIPTOR' : _STREAMTRANSLATESTATES,
'__module__' : 'stream_translate_states_pb2'
# @@protoc_insertion_point(class_scope:metadata_center.StreamTranslateStates)
})
_sym_db.RegisterMessage(StreamTranslateStates)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
de97fb29bc50a39e1dba26ab527c2bf6030e1521 | a392cd0963b030c934f2a9e329867a68515f4a5c | /cotoha/ne.py | acae889f794da0baf8c1d02b9350faa61b31d382 | [] | no_license | hatopoppoK3/COTOHA-Python | b89517bc6037c95a692dd85c98007727a712da24 | df333167f775e7a550827d016bf1892e36ac5602 | refs/heads/master | 2021-01-16T02:14:44.716987 | 2020-05-20T15:22:31 | 2020-05-20T15:22:31 | 242,940,170 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | from cotoha.api import Cotoha
from cotoha.api import check_dic_class, get_sentence_class
class CotohaNe(Cotoha):
"""固有表現抽出に関するクラス.
"""
def __init__(self, sentence: str, kuzure_flag=False, dic_class=[]):
"""
Args:
sentence (str): 解析対象文.
sentence_class (bool, optional): 崩れ文かどうか. Defaults to False.
dic_class (list, optional): 専門用語辞書. Defaults to [].
Raises:
NeError: dic_classにエラーがある場合.
"""
super().__init__()
self.sentence = sentence
self.sentence_class = get_sentence_class(kuzure_flag)
if check_dic_class(dic_class):
self.dic_class = dic_class
else:
raise NeError('dic_classにエラーがあります.')
request_json = {'sentence': self.sentence,
'type': self.sentence_class,
'dic_type': self.dic_class}
response_dict = self.get_response_dict(
relative_url='nlp/v1/ne', request_body=request_json)
self.message = response_dict['message']
self.status = response_dict['status']
self.ne_result_list = []
for result_dict in response_dict['result']:
self.ne_result_list.append(NeResult(result_dict))
def __str__(self) -> str:
string = super().__str__()
string += 'sentence:{}\n'.format(self.sentence)
string += 'sentence_class:{}\n'.format(self.sentence_class)
string += 'dic_class:{}\n'.format(self.dic_class)
string += 'message:{}\n'.format(self.message)
string += 'status:{}\n'.format(self.status)
for ne_result in self.ne_result_list:
string += ne_result.__str__()
return string
class NeError(Exception):
"""固有表現抽出に関する例外クラス.
dic_classやsentence_classに関するエラーがある場合に呼ばれる.
"""
class NeResult(object):
"""固有表現抽出の結果に関するクラス.
"""
def __init__(self, result_dict: dict):
self.begin_pos = result_dict['begin_pos']
self.end_pos = result_dict['end_pos']
self.form = result_dict['form']
self.std_form = result_dict['std_form']
self.ne_class = result_dict['class']
self.extended_class = result_dict['extended_class']
self.source = result_dict['source']
def __str__(self):
string = 'begin_pos:{}\n'.format(self.begin_pos)
string += 'end_pos:{}\n'.format(self.end_pos)
string += 'form:{}\n'.format(self.form)
string += 'std_form:{}\n'.format(self.std_form)
string += 'ne_class:{}\n'.format(self.ne_class)
string += 'extended_class:{}\n'.format(self.extended_class)
string += 'source:{}\n'.format(self.source)
return string
| [
"[email protected]"
] | |
10fb9cdb369291fef5073bff8f48c9cf3c5df16d | bc2e77b92ae12e34aec9445f5e8c92fc399e6cc8 | /vautils/resource/inventory.py | 18a0764d7b1c167fd3ebb82dcff7d000fcb08435 | [] | no_license | 18782967131/test | fc8049c10f02f7b49a4dd4a675618ccf388c4f2f | cb02979e233ce772bd5fe88ecdc31caf8764d306 | refs/heads/master | 2020-05-27T08:41:57.535153 | 2017-11-09T08:31:07 | 2017-11-09T08:31:07 | 82,536,387 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,661 | py | """ coding: utf-8
Copyright 2016 vArmour Networks private.
All Rights reserved. Confidential
Inventory implements the class that abstracts the test inventory and the
vms required to run an automated test.
.. moduleauthor:: [email protected]
"""
from vautils import logger
from vautils.exceptions import UnknownDevice
from vautils.resource.delegator import Delegator
from vautils.resource import * # NOQA
VALID_TYPES = ['dir', 'cp', 'ep', 'epi', 'linux', 'vcenter']
class VaInventory(object):
"""
Represents the lab setup of a test bed whose information is stored in
a representable form. It provides an interface to the tests to obtain
test bed and device information. Currently the information is stored
in a certain expected fashion in yaml format. If the information
storage medium changes like a database in the future, the class
implementation may undergo a little change but the interface will be
similar (if not same).
"""
def __init__(self, **kwargs):
"""
Initialize the test lab and device objects. Test lab consists of
hypervisors, vswitches, users, vlans, users, and devices. A name
or a unique-id is used to identify the test lab.
"""
inventory = kwargs
self._hypervisor = list()
self._aci = list()
self._linux = list()
self._dir = list()
self._cp = list()
self._ep = list()
self._epi = list()
self._vcenter = list()
self._log = logger
self._uniq_ids = dict()
self._network = inventory.get('network')
self._va_make_hypervisor(inventory.get('hypervisor'))
self._va_make_aci(inventory.get('aci'))
for vm_type in inventory.keys():
if vm_type in VALID_TYPES:
many_vms = inventory.get(vm_type)
self._va_make_vm(vm_type, many_vms)
else:
if vm_type != 'hypervisor' and vm_type != 'network' and\
vm_type != 'aci':
self._log.warn("Invalid vm type: {}".format(vm_type))
self._va_categorize_by_uniq_id()
def va_get_vm(self, vm_type=None, uniq_id=None):
"""
get a vm of vm_type if specified or a list of all vms.
"""
if vm_type not in VALID_TYPES:
raise UnknownDevice(vm_type)
try:
attrib = ''.join(('_', "{}".format(vm_type)))
vms = getattr(self, "{}".format(attrib))
except AttributeError:
# TODO: raise DeviceNotSetup(device_type)
raise
else:
if uniq_id:
for vm in vms:
if vm.get_uniq_id() == uniq_id:
return vm
else:
return vms
def va_get_hypervisor(self, name=None):
"""
Get a 'hypervisor' node, if name is specified or a list of all
hypervisors.
Kwargs:
:name (str): hostname of the hypervisor
"""
if name:
for hvisor in self._hypervisor:
if name == hvisor.get_hostname():
return hvisor
return self._hypervisor
def va_get_aci(self, name=None):
"""
Get an 'aci' node, if name is specified or a list of all acis
Kwargs:
:name (str): hostname of the aci
"""
if name:
for aci in self._aci:
if name == aci.get_hostname():
return aci
return self._aci
def va_get_dir(self, name=None):
"""
Get a 'director' vm, if name is specified or a list of all
dirs.
Kwargs:
:name (str): hostname of the director
"""
if name:
for director in self._dir:
if name == director.get_hostname():
return director
return self._dir
def va_get_ep(self, name=None):
"""
Get a 'ep' vm if name is specified or a list of all eps.
Kwargs:
:name (str): hostname of the ep
"""
if name:
for ep in self._ep:
if name == ep.get_hostname():
return ep
return self._ep
def va_get_cp(self, name=None):
"""
Get a 'cp' vm if name is specified or a list of all cps.
Kwargs:
:name (str): hostname of the cp
"""
if name:
for cp in self._cp:
if name == cp.get_hostname():
return cp
return self._cp
def va_get_epi(self, name=None):
"""
Get a 'epi' vm if name is specified or a list of all epis.
Kwargs:
:name (str): hostname of the epi
"""
if name:
for epi in self._epi:
if name == epi.get_hostname():
return epi
return self._epi
def va_get_vcenter(self, name=None):
"""
Get a 'hypervisor' node, if name is specified or a list of all
hypervisors.
Kwargs:
:name (str): hostname of the hypervisor
"""
if name:
for vcenter in self._vcenter:
if name == vcenter.get_hostname():
return vcenter
return self._vcenter
def va_get_linux(self, name=None):
"""
Get a 'linux' vm if name is specified or a list of all linux vms
Kwargs:
:name (str): hostname of the pc
"""
if name:
for linux in self._linux:
if name == linux.get_hostname():
return linux
return self._linux
def va_get_by_uniq_id(self, uniq_id=None, delegator=True, add_nocli_user=False):
"""
get the vm by unique id
kwargs:
uniq_id (str): unique id for the resource vm
delegator (bool): if delegator version of the resource is
needed (default is False)
"""
if add_nocli_user :
self._uniq_ids.get(uniq_id).add_nocli_user=add_nocli_user
if uniq_id in self._uniq_ids:
if delegator:
return Delegator(self._uniq_ids.get(uniq_id))
else:
return self._uniq_ids.get(uniq_id)
else:
# TODO: raise UnknownUniqId(uniq_id)
# I don't know if raise is needed,
# you can just let user know uniq id not exists
logger.warning("unknown uniq_id {}".format(str(uniq_id)))
def va_get_network_config(self, attribute=None):
"""
method get the network config by attribute. If attribute is not
mentioned - return the entire network config.
"""
if attribute in self._network.keys():
return self._network.get(attribute)
else:
return self._network
def _va_make_hypervisor(self, hypervisors=None):
"""
make a hypervisor object - also validates the data provided
for the device before initializing.
kwargs:
:hypervisors (list): list of hypervisors in the lab
"""
if hypervisors:
for hypervisor in hypervisors:
self._hypervisor.append(Esxi(**hypervisor))
else:
# TODO: raise DeviceNotFound
pass
def _va_make_aci(self, aci=None):
"""
make an aci object - also validates the data provided for the
device before initializing.
kwargs:
:aci (list): list of aci in the lab
"""
if aci:
for each_aci in aci:
self._aci.append(AciApic(**each_aci))
else:
# TODO: raise DeviceNotFound
pass
def _va_make_vm(self, vm_type=None, vms=None):
"""
make a specific vm object - also validates the data provided for
the vm before initializing.
kwargs:
:vm_type (str): vm type - dir|ep|epi|linux
:vms (list): list of specific vms of vm type in the inventory
"""
for vm in vms:
vm['type'] = vm_type
if vm_type in ('dir', 'ep', 'cp', 'epi'):
vm_rep = VarmourVm(**vm)
elif vm_type == 'linux':
vm_rep = LinuxVm(**vm)
elif vm_type == "vcenter":
vm_rep = VcenterVm(**vm)
vm_list = self.va_get_vm(vm_type)
vm_list.append(vm_rep)
def _va_categorize_by_uniq_id(self):
"""
helper method to categorize inventory by unique id's in a dict
"""
for node_type in VALID_TYPES:
nodes = self.va_get_vm(node_type)
for node in nodes:
uniq_id = node.get_uniq_id()
self._uniq_ids[uniq_id] = node
for node in self.va_get_hypervisor():
uniq_id = node.get_uniq_id()
self._uniq_ids[uniq_id] = node
for node in self.va_get_aci():
uniq_id = node.get_uniq_id()
self._uniq_ids[uniq_id] = node
def __del__(self):
self._hypervisor = None
self._aci = None
self._dir = None
self._ep = None
self._epi = None
self._pc = None
self._uniq_ids = None
| [
"[email protected]"
] | |
88a06957d542b33485cfa40edd4ee1565f4d15eb | c3cfd90531b560b9522a10089101dd09ed13233e | /arithmetic.py | c91f0e9764c593d10713fc7d9bd05f1a91842425 | [] | no_license | kateybatey/calculator-1 | 602368782265dd302fc4eee376e0d3568e826f2a | 01b24c34d4d0c369279eea6e8ced9fd17bf35722 | refs/heads/master | 2021-01-22T02:04:27.807606 | 2017-05-24T19:48:10 | 2017-05-24T19:48:10 | 92,330,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | """Math functions for calculator."""
def add(num1, num2):
"""Return the sum of the two input integers."""
total = num1 + num2
return total
def subtract(num1, num2):
"""Return the second number subtracted from the first."""
total = num1 - num2
return total
def multiply(num1, num2):
"""Multiply the two inputs together."""
total = num1 * num2
return total
def divide(num1, num2):
"""Divide the first input by the second, returning a floating point."""
total = num1 / num2
return total
def square(num1):
"""Return the square of the input."""
total = num1 ** 2
return total
def cube(num1):
"""Return the cube of the input."""
total = num1 ** 3
return total
def power(num1, num2):
"""Raise num1 to the power of num and return the value."""
total = num1 ** num2
return total
def mod(num1, num2):
"""Return the remainder of num / num2."""
total = num1 % num2
return total
| [
"[email protected]"
] | |
567fa248d17de4c3fd124cb904e5495ef97e5a25 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_64/27.py | 4c01b978bd8b3ea6271d01fc75a29607b2413799 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | import sys
import pdb
class Reader:
def __init__(self, filename):
self.fp = open(filename)
def read(self):
tokens = self.fp.readline().split()
result = []
for token in tokens:
try:
result.append(int(token, 10))
except ValueError:
result.append(token)
return result
def read_strings(self):
tokens = self.fp.readline().split()
return tokens
def bits(x, n):
result = []
for i in xrange(n - 1, -1, -1):
result.append((x & (1 << i)) != 0)
return result
class Board:
def __init__(self, board):
self.board = board
self.row_count = len(board)
self.col_count = len(board[0])
assert all(len(row) == self.col_count for row in board)
self.opt = [[None] * self.col_count for i in xrange(self.row_count)]
def remove_largest_square(self):
for i in xrange(self.row_count):
self.opt[i][0] = 1 if self.board[i][0] is not None else 0
for j in xrange(self.col_count):
self.opt[0][j] = 1 if self.board[0][j] is not None else 0
for i in xrange(1, self.row_count):
for j in xrange(1, self.col_count):
here = self.board[i][j]
up = self.board[i - 1][j]
left = self.board[i][j - 1]
upleft = self.board[i - 1][j - 1]
if here is None: # burned out
self.opt[i][j] = 0
elif (here is upleft is True and up is left is False) or \
(here is upleft is False and up is left is True):
self.opt[i][j] = min(self.opt[i - 1][j - 1],
self.opt[i][j - 1],
self.opt[i - 1][j]) + 1
assert self.opt[i][j] >= 2
else:
self.opt[i][j] = 1
square_max = 0
square_i = square_j = None
for i in xrange(self.row_count):
for j in xrange(self.col_count):
if self.opt[i][j] > square_max:
square_max = self.opt[i][j]
square_i, square_j = i, j
# return if no square to remove
if square_i is None:
return 0
# burn out the removed square
for i in xrange(square_i - square_max + 1, square_i + 1):
for j in xrange(square_j - square_max + 1, square_j + 1):
self.board[i][j] = None
# return the size of the largest square
return square_max
def tiles_left(self):
return sum(sum(x is not None for x in row) for row in self.board)
def dump(self):
tiles = {None: 'X', True: '@', False: '.'}
for row in self.board:
print ''.join(tiles[x] for x in row)
if __name__ == '__main__':
reader = Reader(sys.argv[1])
case_count, = reader.read()
for case in xrange(case_count):
# dynamic programming woo
row_count, col_count = reader.read()
board = []
for i in xrange(row_count):
row_hex, = reader.read_strings()
board.append(bits(int(row_hex, 16), col_count))
board = Board(board)
square_counts = {}
while True:
size = board.remove_largest_square()
if size == 0:
break
elif size == 1:
square_counts.setdefault(1, 0)
square_counts[1] += 1
square_counts[1] += board.tiles_left()
break
else:
square_counts.setdefault(size, 0)
square_counts[size] += 1
assert sum(size**2 * count for size, count in
square_counts.iteritems()) == row_count * col_count
print "Case #%d: %d" % (case + 1, len(square_counts))
for size in reversed(square_counts.keys()):
print "%d %d" % (size, square_counts[size])
| [
"[email protected]"
] | |
1bfa2e3038d4e8f3250045713006589dc9f8952b | 2afb1095de2b03b05c8b96f98f38ddeca889fbff | /Python_Basic/def/decorators_2.py | 33bb013abb5f1e24d9295d6568415f5872fd17b5 | [] | no_license | draganmoo/trypython | 187316f8823296b12e1df60ef92c54b7a04aa3e7 | 90cb0fc8626e333c6ea430e32aa21af7d189d975 | refs/heads/master | 2023-09-03T16:24:33.548172 | 2021-11-04T21:21:12 | 2021-11-04T21:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,469 | py | import time
''"""
添加会员,我们要先模拟一个会员库,比如有一个会员名字叫synferlo,密码是harry618
如果不通过装饰器,为了避免大规模修改源代码的函数,我们只能通过修改函数调用方式(即高阶函数)的方法达到效果。
也就是说,我们将一个函数作为参数传入到另一个函数中
"""''
"""
方法:
在log_in_system()函数中嵌套一个inner函数,然后return inner,这样整个log_in_system不会返回US,JP函数的结果,而是把inner函数执行后的内存地址返回
然后我们把它赋值给一个变量,在通过这个变量名+()的方式调用inner函数,通过闭包完成不修改源代码和调用方式的完成代码扩展
我们把一个函数的名字赋给一个变量,然后再调用这个变量就相当于调用这个函数:
在不改变源代码和调用方式前提下,把函数名赋值给新变量,然后再调用,这就是“函数装饰器”
又或者,你不想在后面添加:
US_EU = log_in_system(US_EU)
JP_KO = log_in_system(JP_KO)
则可以再想要装饰的函数上面添加 @log_in_system 来达到相同的效果,这就是装饰器的语法。
"""
data_base = {'is_log_in':False,
'user name':'synferlo',
'password':'harry618'}
def log_in_system(func):
##对字典内变量值得判断用方括号,后面加is或者==
def inner():
if data_base['is_log_in'] == False:
user_name = input('user name: ')
pass_word = input('password: ')
if user_name == data_base['user name'] and pass_word == data_base['password']:
print('Success! Welcome to Membership Section')
data_base['is_log_in'] = True
func()
else:
print('User name or password is not matched. Please try again')
else:
print('log in request has been approved')
func()
return inner
"""
添加了return inner后,后面US_EU = log_in_system(US_EU)返回的是inner的内存地址,而不是执行inner,即之前学的闭包现象
当我们真正想要调用inner的时候,需要执行US_EU() (即,代码原本的调用方式
这样我们在不修改原本调用方式和源代码的情况下完成了扩展代码的任务!!"""
def home_page():
print("""
--------------------Home Page------------------
this is the home page of this website
End
""")
def US_EU():
print("""
-----------------US and EU Section--------------
welcome to US and EU Membership section
End
""")
def JP_KO():
print("""
-----------------JP and KO Section--------------
welcome to JP and KO Membership section
End
""")
def domestic():
print("""
-----------------Domestic Section--------------
welcome to Domestic section
End
""")
home_page()
domestic()
"""##注意这里一定不能载US_EU和JP_KO后面加括号,加括号表示要启动这个函数,而我们这里是以索引,只是用他们的名字赋值"""
US_EU = log_in_system(US_EU)
JP_KO = log_in_system(JP_KO)
US_EU()
JP_KO()
"""
通过 @log_in_system 达成装饰器效果:
装饰器可以添加多个:执行时候从上到下执行:
@pay_money
@vip_level
@log_in_system
....
"""
data_base = {'is_log_in':False,
'user name':'synferlo',
'password':'harry618'}
def log_in_system(func):
##对字典内变量值得判断用方括号,后面加is或者==
def inner():
if data_base['is_log_in'] == False:
user_name = input('user name: ')
pass_word = input('password: ')
if user_name == data_base['user name'] and pass_word == data_base['password']:
print('Success! Welcome to Membership Section')
data_base['is_log_in'] = True
func()
else:
print('User name or password is not matched. Please try again')
else:
print('log in request has been approved')
func()
return inner
"""
添加了return inner后,后面US_EU = log_in_system(US_EU)返回的是inner的内存地址,而不是执行inner,即之前学的闭包现象
当我们真正想要调用inner的时候,需要执行US_EU() (即,代码原本的调用方式
这样我们在不修改原本调用方式和源代码的情况下完成了扩展代码的任务!!"""
def home_page():
print("""
--------------------Home Page------------------
this is the home page of this website
End
""")
@log_in_system
def US_EU():
print("""
-----------------US and EU Section--------------
welcome to US and EU Membership section
End
""")
@log_in_system
def JP_KO():
print("""
-----------------JP and KO Section--------------
welcome to JP and KO Membership section
End
""")
def domestic():
print("""
-----------------Domestic Section--------------
welcome to Domestic section
End
""")
home_page()
domestic()
US_EU()
JP_KO() | [
"[email protected]"
] | |
b90aa4b5a4811a0ca87f243f0d565b054e51b68f | e85e846960750dd498431ac8412d9967646ff98d | /users/models.py | 04bb300981e353cba597c91084bfddf5a3fcafba | [] | no_license | onosaburo/clublink_django | 19368b4a59b3aed3632883ceffe3326bfc7a61a6 | d2f6024b6224ea7f47595481b3382b8d0670584f | refs/heads/master | 2022-03-30T05:30:12.288354 | 2020-01-27T18:09:11 | 2020-01-27T18:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,774 | py | import calendar
import json
import base64
import math
from datetime import datetime
from Crypto.Cipher import AES
from urllib.parse import urlencode, quote_plus
from urllib.request import quote
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.dispatch import receiver
from django.urls import reverse
from django.db import models
from django.db.models import signals
from django.utils.translation import ugettext_lazy as _
from clublink.base.crypto import AESCipher
from clublink.base.utils import today
from clublink.users.managers import UserManager
from clublink.clubs.models import Club
class UserCategory(models.Model):
id = models.CharField(max_length=6, primary_key=True)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
def __repr__(self):
return '<UserCategory {}>'.format(self.name)
class ClubCorp(models.Model):
id = models.CharField(max_length=6, primary_key=True)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
def __repr__(self):
return '<ClubCorp {}>'.format(self.name)
class UserType(models.Model):
id = models.CharField(max_length=6, primary_key=True)
name = models.CharField(max_length=255)
is_corp = models.BooleanField(default=False)
def __str__(self):
return self.name
def __repr__(self):
return '<UserType {}>'.format(self.name)
class User(AbstractBaseUser, PermissionsMixin):
STATUSES = (
('A', _('Active')),
('R', _('Resigned')),
('S', _('Suspended')),
)
username = models.CharField(max_length=48, unique=True)
membership_number = models.CharField(max_length=15, unique=True, null=True, blank=True)
employee_number = models.CharField(max_length=15, unique=True, null=True, blank=True)
first_name = models.CharField(max_length=255, blank=True)
last_name = models.CharField(max_length=255, blank=True)
middle_name = models.CharField(max_length=255, blank=True)
email = models.EmailField(null=True, blank=True)
is_staff = models.BooleanField(
'staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
category = models.ForeignKey(
UserCategory,
null=True,
blank=True,
related_name='users',
on_delete=models.SET_NULL
)
clubcorp = models.ForeignKey(
ClubCorp,
null=True,
blank=True,
related_name='users',
on_delete=models.SET_NULL
)
clubcorp_number = models.CharField(max_length=5, null=True, blank=True)
customer_id = models.CharField(max_length=15, null=True, blank=True)
home_club = models.ForeignKey(
Club,
null=True,
blank=True,
related_name='users',
on_delete=models.SET_NULL)
option_club = models.ForeignKey(
Club,
null=True,
blank=True,
related_name='option_users',
on_delete=models.SET_NULL
)
home_club_alternate_1 = models.ForeignKey(
Club,
null=True,
blank=True,
related_name='alt1_users',
on_delete=models.SET_NULL
)
home_club_alternate_2 = models.ForeignKey(
Club,
null=True,
blank=True,
related_name='alt2_users',
on_delete=models.SET_NULL
)
preferred_language = models.CharField(max_length=2, default=settings.LANGUAGE_CODE,
choices=settings.LANGUAGES)
status = models.CharField(max_length=1, default='A', choices=STATUSES)
type = models.ForeignKey(
UserType,
null=True,
blank=True,
related_name='users',
on_delete=models.SET_NULL
)
invited = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = (
'email',
)
class InvalidToken(Exception):
pass
class ExpiredToken(InvalidToken):
pass
class Meta:
permissions = (
('manage_gift_certificates', 'Can manage Gift Certificates',),
)
def __str__(self):
return self.username
def __repr__(self):
return '<User {}>'.format(self.username)
@property
def department_list(self):
return ', '.join(self.departments.values_list('name', flat=True))
@property
def club_list(self):
return ', '.join(self.clubs.values_list('name', flat=True))
@property
def can_access_cms(self):
if hasattr(self, 'permissions'):
return self.permissions.can_access_cms
else:
return False
@property
def can_impersonate_user(self):
if hasattr(self, 'permissions'):
return self.permissions.can_impersonate_user
else:
return False
@property
def __csv_row__(self):
return [
self.id,
self.membership_number,
self.username,
self.first_name,
self.last_name,
self.email,
self.is_superuser,
self.status,
self.preferred_language,
self.department_list,
self.club_list,
self.can_access_cms,
self.can_impersonate_user
]
def generate_email(self):
template = '''
Good morning {first_name},
We are pleased to announce the launch of the new {home_club} and ClubLink website.
The new sites have been built on a fully responsive platform that is easy to navigate. All the familiar tools for managing your account, booking tee times with LinkLine OnLine, accessing the club roster, or signing up for events are very accessible and most importantly, mobile friendly.
As this is a completely new system, you will need to create a new password to access the member portal. To do so, please click the link below:
{reset_base}?token={token}
As a reminder, should you ever forget your password again in the future, you can reset your password at https://clublink.com/login/forgot/.
Once you have logged in successfully, please familiarize yourself with the new website. We've organized things to be more user friendly based upon feedback over the years with our previous site.
Here are a few quick tips to navigating your new site:
Booking a tee time is now easier than ever! On the homepage, click the “Book a Tee Time” button to book tee times with LinkLine OnLine
To view the Club Calendar, from the homepage click “My Club”
To view your Member Account Statement, from the homepage click “My Account”
To opt-in to online statements, under “My Account”, click “My Profile”, and then “Communications”. Check the box next to “Receive annual dues notice via email” and “Receive statement via email”
If you encounter any issues, please email Member Services at [email protected]. If you need to speak to a Member Services representative, please call 1-800-273-5113.
Member Services Call Center Hours of Operation
Weekdays 8 a.m. – 5:30 p.m.
Weekends 8 a.m. – 2 p.m.
Regards,
ClubLink Member Services
15675 Dufferin Street
King City, ON, L7B 1K5
1-800-273-5113
[email protected]
www.clublink.com
'''.format(
**{
'first_name': self.first_name,
'home_club': self.home_club.name if self.home_club else None,
'reset_base': 'https://clublink.com/login/reset/',
'token': quote(self.generate_reset_token())
}
)
return template
@property
def option_club_name(self):
if self.option_club:
return self.option_club.name
else:
return None
def get_roster_phone(self):
if self.profile.show_phone:
return self.profile.show_phone.phone
else:
return None
def get_roster_cell(self):
if self.profile.show_cell:
return self.profile.show_cell.cell_phone
else:
return None
def get_roster_email(self):
if self.profile.show_email:
return self.profile.show_email.email
else:
return None
@property
def my_cell_phone(self):
if self.addresses.exists():
return self.addresses.first().cell_phone
@property
def my_phone(self):
if self.addresses.exists():
return self.addresses.first().phone
@property
def is_active(self):
return self.status != 'R'
@property
def legacy_renewal_link(self):
return 'https://clublinkplayersclub.ca/?member={}'.format(
quote_plus(self.encrypted_membership_number)
)
@property
def renewal_link(self):
data = {'firstName': self.first_name, 'lastName': self.last_name, 'membershipNumber': self.membership_number, 'email': self.email}
roundto = math.ceil(len(str(data))/16)
msg_text = str(data).rjust(roundto*16)
secret_key = settings.MEMBERSHIP_ENCODE_KEY
cipher = AES.new(secret_key, AES.MODE_ECB)
encoded = base64.urlsafe_b64encode(cipher.encrypt(msg_text))
link = '{}?{}'.format(
settings.MEMBERSHIP_RENEWAL_URL_BASE,
urlencode({'ztd': encoded})
)
print(link)
decoded = cipher.decrypt(base64.urlsafe_b64decode(encoded))
print(decoded)
return link
def get_full_name(self):
return '{} {}'.format(self.first_name, self.last_name).strip()
def get_short_name(self):
return self.first_name
def save(self, *args, **kwargs):
if self.preferred_language:
self.preferred_language = self.preferred_language.lower()
super().save(*args, **kwargs)
def generate_reset_token(self):
cipher = AESCipher()
details = {
'timestamp': calendar.timegm(datetime.utcnow().utctimetuple()),
'nonce': self.password,
'pk': self.pk,
}
return cipher.encrypt(json.dumps(details))
@classmethod
def parse_reset_token(cls, token):
cipher = AESCipher()
try:
token_json = cipher.decrypt(token)
except:
raise cls.InvalidToken()
try:
details = json.loads(token_json)
except json.JSONDecodeError:
raise cls.InvalidToken()
now = calendar.timegm(datetime.utcnow().utctimetuple())
if now - details['timestamp'] > 14 * 24 * 60 * 60:
raise cls.ExpiredToken()
try:
nonce = details['nonce']
except KeyError:
raise cls.InvalidToken()
try:
user = cls.objects.get(pk=details['pk'], password=nonce)
except cls.DoesNotExist:
raise cls.InvalidToken()
return user
@property
def encrypted_membership_number(self):
if not self.membership_number:
return None
cipher = AESCipher()
return cipher.encrypt(self.membership_number).decode()
@staticmethod
def decrypt_membership_number(encrypted):
cipher = AESCipher()
return cipher.decrypt(encrypted)
def permits(self, name, default=False):
if self.is_superuser:
return True
permissions, _ = UserPermissions.objects.get_or_create(user=self)
return getattr(permissions, name, default)
@receiver(signals.post_save, sender=User)
def user_post_save(sender, instance, **kwargs):
Profile.objects.get_or_create(user=instance)
UserPermissions.objects.get_or_create(user=instance)
class Address(models.Model):
type = models.CharField(max_length=10)
'''
We are moving the charfield type to a choicefield.
Currently, there is no way to properly link things without either a FK or
a proper choicefield.
'''
HOME = 'H'
BUSINESS = 'B'
COTTAGE = 'C'
OTHER = 'O'
ADDRESS_TYPE_CHOICES = (
(HOME, _('Home')),
(BUSINESS, _('Business')),
(COTTAGE, _('Cottage')),
(OTHER, _('Other')),
)
_type = models.CharField(
choices = ADDRESS_TYPE_CHOICES,
max_length = 1,
blank=True, null=True
)
user = models.ForeignKey(
User,
related_name='addresses',
on_delete=models.PROTECT
)
address1 = models.CharField(max_length=30, null=True, blank=True)
address2 = models.CharField(max_length=30, null=True, blank=True)
cell_phone = models.CharField(max_length=30, null=True, blank=True)
city = models.CharField(max_length=30, null=True, blank=True)
country = models.CharField(max_length=3, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
phone = models.CharField(max_length=30, null=True, blank=True)
state = models.CharField(max_length=3, null=True, blank=True)
postal_code = models.CharField(max_length=10, null=True, blank=True)
class Meta:
unique_together = (('type', 'user'),)
class Profile(models.Model):
user = models.OneToOneField(
User,
related_name='profile',
on_delete=models.PROTECT
)
joined = models.DateField(default=today)
title = models.CharField(max_length=10, null=True, blank=True)
dob = models.DateField(null=True)
gender = models.CharField(max_length=1, null=True, blank=True)
employer = models.CharField(max_length=80, null=True, blank=True)
position = models.CharField(max_length=30, null=True, blank=True)
statement_cycle_id = models.CharField(max_length=2, null=True, blank=True)
show_in_roster = models.BooleanField(default=False)
prepaid_cart = models.BooleanField(default=False)
email_dues_notice = models.BooleanField(default=False)
email_statement = models.BooleanField(default=False)
subscribe_score = models.BooleanField(default=False)
subscribe_clublink_info = models.BooleanField(default=False)
subscribe_club_info = models.BooleanField(default=False)
billing_address = models.ForeignKey(
Address,
null=True,
related_name='billing_profile',
on_delete=models.SET_NULL
)
mailing_address = models.ForeignKey(Address,
null=True,
related_name='mailing_profile',
on_delete=models.SET_NULL
)
show_email = models.ForeignKey(
Address,
on_delete=models.PROTECT,
help_text='ForeignKey to know which email to show.',
blank=True,
null=True,
related_name='email_profiles'
)
show_phone = models.ForeignKey(
Address,
on_delete=models.PROTECT,
help_text='ForeignKey to know which phone to show.',
blank=True,
null=True,
related_name='phone_profiles'
)
show_cell = models.ForeignKey(
Address,
on_delete=models.PROTECT,
help_text='ForeignKey to know which cell to show.',
blank=True,
null=True,
related_name='cell_profiles'
)
class UserPermissions(models.Model):
user = models.OneToOneField(
User,
related_name='permissions',
on_delete=models.PROTECT
)
can_access_cms = models.BooleanField(default=False)
can_impersonate_user = models.BooleanField(default=False)
sync_order = [
ClubCorp, UserCategory, UserType,
User, Profile, UserPermissions
] | [
"[email protected]"
] | |
7a5e196b02ba31337cbddd33024bc481f016989c | c2ed4a7cb50296d11a8d16c2c68165737d2a940f | /qdmr/data/utils.py | c3ae2fee7ec73f5169b18c6b5436e3ad067d04e5 | [] | no_license | nitishgupta/qdmr | 24cb99a5a21b9203e60a5a52f62e58d34f958c46 | 812ad76104a01a9692256ddbbab6435cf425b5a0 | refs/heads/master | 2021-02-03T22:50:18.864016 | 2020-06-01T17:08:39 | 2020-06-01T17:08:39 | 243,564,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,136 | py | import json
from typing import List, Tuple, Set, Dict, Union, Any
from qdmr.domain_languages.qdmr_language import QDMRLanguage
qdmr_langugage = QDMRLanguage()
QDMR_predicates = list(qdmr_langugage._functions.keys())
class Node(object):
def __init__(self, predicate, string_arg=None):
self.predicate = predicate
self.string_arg = string_arg
# Empty list indicates leaf node
self.children: List[Node] = []
# parent==None indicates root
self.parent: Node = None
def add_child(self, obj):
assert isinstance(obj, Node)
obj.parent = self
self.children.append(obj)
def is_leaf(self):
leaf = True if not len(self.children) else False
return leaf
def get_nested_expression(self):
if not self.is_leaf():
nested_expression = [self.predicate]
for child in self.children:
nested_expression.append(child.get_nested_expression())
return nested_expression
else:
return self.predicate
# def _get_nested_expression_with_strings(self):
# """ Nested expression where predicates are replaced with string_arg if present.
# Note: This nested expression is only used for human-readability and debugging.
# This cannot be converted to lisp notation and is obviously not a parsable program.
# """
# string_or_predicate = self.string_arg if self.string_arg is not None else self.predicate
# if not self.is_leaf():
# nested_expression = [string_or_predicate]
# for child in self.children:
# nested_expression.append(child._get_nested_expression_with_strings())
# return nested_expression
# else:
# return string_or_predicate
def get_nested_expression_with_strings(self):
""" Nested expression where predicates w/ string_arg are written as PREDICATE(string_arg)
This is introduced with drop_language since there are multiple predicates that select string_arg from text.
Therefore, if we just write the string-arg to data (as done previously with typed_nested_expression), it
would not be sufficient that some node is not a predicate in the language and hence get_ques_span pred.
We can write the output of this function in json data and parse it back by removing split on ( and )
"""
node_name = self.predicate
if self.string_arg is not None:
# GET_QUESTION_NUMBER(25)
node_name = node_name + "(" + self.string_arg + ")"
if self.is_leaf():
return node_name
else:
nested_expression = [node_name]
for child in self.children:
nested_expression.append(child.get_nested_expression_with_strings())
return nested_expression
class QDMRExample(object):
def __init__(self, q_decomp):
self.query_id = q_decomp["question_id"]
self.question = q_decomp["question_text"]
self.split = q_decomp["split"]
self.decomposition = q_decomp["decomposition"]
self.program: List[str] = q_decomp["program"]
self.nested_expression: List = q_decomp["nested_expression"]
self.operators = q_decomp["operators"]
# Filled by parse_dataset/qdmr_grammar_program.py if transformation to QDMR-language is successful
# This contains string-args as it is
self.typed_nested_expression: List = []
if "typed_nested_expression" in q_decomp:
self.typed_nested_expression = q_decomp["typed_nested_expression"]
# This was added after completing parse_dataset/drop_grammar_program. This class is a moving target.
# This nested_expression should be s.t. string-arg grounding predicates occur as PREDICATE(string-arg).
# e.g. ['FILTER_NUM_EQ', ['SELECT', 'GET_QUESTION_SPAN(field goals of Mason)'], 'GET_QUESTION_NUMBER(37)']
self.drop_nested_expression: List = []
if "drop_nested_expression" in q_decomp:
self.drop_nested_expression = q_decomp["drop_nested_expression"]
self.program_tree: Node = None
self.typed_masked_nested_expr = []
if self.drop_nested_expression:
self.program_tree: Node = nested_expression_to_tree(self.drop_nested_expression,
predicates_with_strings=True)
# This contains string-args masked as GET_QUESTION_SPAN predicate
self.typed_masked_nested_expr = self.program_tree.get_nested_expression()
elif self.typed_nested_expression:
self.program_tree: Node = nested_expression_to_tree(self.typed_nested_expression,
predicates_with_strings=True)
# This contains string-args masked as GET_QUESTION_SPAN predicate
self.typed_masked_nested_expr = self.program_tree.get_nested_expression()
self.extras: Dict = q_decomp.get("extras", {})
def to_json(self):
json_dict = {
"question_id": self.query_id,
"question_text": self.question,
"split": self.split,
"decomposition": self.decomposition,
"program": self.program,
"nested_expression": self.nested_expression,
"typed_nested_expression": self.typed_nested_expression,
"drop_nested_expression": self.drop_nested_expression,
"operators": self.operators,
"extras": self.extras,
}
return json_dict
def read_qdmr_json_to_examples(qdmr_json: str) -> List[QDMRExample]:
"""Parse processed qdmr json (from parse_dataset/parse_qdmr.py or qdmr_grammar_program.py into List[QDMRExample]"""
qdmr_examples = []
with open(qdmr_json, 'r') as f:
dataset = json.load(f)
for q_decomp in dataset:
qdmr_example = QDMRExample(q_decomp)
qdmr_examples.append(qdmr_example)
return qdmr_examples
def write_qdmr_examples_to_json(qdmr_examples: List[QDMRExample], qdmr_json: str):
examples_as_json_dicts = [example.to_json() for example in qdmr_examples]
with open(qdmr_json, 'w') as outf:
json.dump(examples_as_json_dicts, outf, indent=4)
def nested_expression_to_lisp(nested_expression) -> str:
if isinstance(nested_expression, str):
return nested_expression
elif isinstance(nested_expression, List):
lisp_expressions = [nested_expression_to_lisp(x) for x in nested_expression]
return "(" + " ".join(lisp_expressions) + ")"
else:
raise NotImplementedError
def convert_nestedexpr_to_tuple(nested_expression):
"""Converts a nested expression list into a nested expression tuple to make the program hashable."""
new_nested = []
for i, argument in enumerate(nested_expression):
if i == 0:
new_nested.append(argument)
else:
if isinstance(argument, list):
tupled_nested = convert_nestedexpr_to_tuple(argument)
new_nested.append(tupled_nested)
else:
new_nested.append(argument)
return tuple(new_nested)
def linearize_nested_expression(nested_expression, open_bracket: str = "(",
close_bracket: str = ")") -> List[str]:
"""Convert the program (as nested expression) into a linearized expression.
The natural language arguments in the program are kept intact as a single program `token` and it is the onus of
the processing step after this to tokenize them
"""
if isinstance(nested_expression, str):
# If the string is not a predicate but a NL argument, it is the onus of the models to tokenize it appropriately
return [nested_expression]
elif isinstance(nested_expression, List):
# Flattened list of tokens for each element in the list
program_tokens = []
for x in nested_expression:
program_tokens.extend(linearize_nested_expression(x, open_bracket, close_bracket))
# Inserting a bracket around the program tokens
program_tokens.insert(0, open_bracket)
program_tokens.append(close_bracket)
return program_tokens
else:
raise NotImplementedError
def nested_expression_to_tree(nested_expression, predicates_with_strings) -> Node:
""" There are two types of expressions, one which have string-arg as it is and without a predicate (True)
and other, newer DROP style, where string-arg nodes in expression are PREDICATE(string-arg)
"""
if isinstance(nested_expression, str):
if not predicates_with_strings:
current_node = Node(predicate=nested_expression)
else:
predicate_w_stringarg = nested_expression
# This can either be a plain predicate (e.g. `SELECT`) or with a string-arg (e.g. `GET_Q_SPAN(string-arg)`)
start_paranthesis_index = predicate_w_stringarg.find("(")
if start_paranthesis_index == -1:
predicate = predicate_w_stringarg
current_node = Node(predicate=predicate)
else:
predicate = predicate_w_stringarg[0:start_paranthesis_index]
# +1 to avoid (, and -1 to avoid )
string_arg = predicate_w_stringarg[start_paranthesis_index+1:-1]
current_node = Node(predicate=predicate, string_arg=string_arg)
elif isinstance(nested_expression, list):
current_node = Node(nested_expression[0])
for i in range(1, len(nested_expression)):
child_node = nested_expression_to_tree(nested_expression[i], predicates_with_strings)
current_node.add_child(child_node)
else:
raise NotImplementedError
return current_node
def get_inorder_function_list(node: Node) -> List[str]:
inorder_func_list = [node.predicate]
for c in node.children:
inorder_func_list.extend(get_inorder_function_list(c))
return inorder_func_list
def get_inorder_function_list_from_template(tempalte: Union[Tuple, str]) -> List[str]:
if isinstance(tempalte, tuple):
inorder_func_list = [tempalte[0]]
for c in tempalte[1:]:
inorder_func_list.extend(get_inorder_function_list_from_template(c))
else:
return [tempalte]
return inorder_func_list
def string_arg_to_quesspan_pred(node: Node):
"""Convert ques-string arguments to functions in QDMR to generic STRING() function."""
if node.predicate not in QDMR_predicates:
node.string_arg = node.predicate
node.predicate = "GET_QUESTION_SPAN"
for child in node.children:
string_arg_to_quesspan_pred(child)
return node
def convert_nestedexpr_to_tuple(nested_expression) -> Tuple[Set[str], Tuple]:
"""Converts a nested expression list into a nested expression tuple to make the program hashable."""
function_names = set()
new_nested = []
for i, argument in enumerate(nested_expression):
if i == 0:
function_names.add(argument)
new_nested.append(argument)
else:
if isinstance(argument, list):
func_set, tupled_nested = convert_nestedexpr_to_tuple(argument)
function_names.update(func_set)
new_nested.append(tupled_nested)
else:
new_nested.append(argument)
return function_names, tuple(new_nested)
def read_drop_dataset(input_json: str):
with open(input_json, "r") as f:
dataset = json.load(f)
return dataset
def convert_answer(answer_annotation: Dict[str, Union[str, Dict, List]]) -> Tuple[str, List]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
elif answer_annotation["number"]:
answer_type = "number"
elif any(answer_annotation["date"].values()):
answer_type = "date"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts = []
if answer_type is None: # No answer
return None
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
elif answer_type == "date":
# answer_content is a dict with "month", "day", "year" as the keys
date_tokens = [
answer_content[key] for key in ["month", "day", "year"] if key in answer_content and answer_content[key]
]
answer_texts = date_tokens
elif answer_type == "number":
# answer_content is a string of number
answer_texts = [answer_content]
return answer_type, answer_texts
if __name__ == "__main__":
p = ['FILTER_NUM_GT', ['FILTER', ['SELECT', 'GET_QUESTION_SPAN(yards of TD passes)'], 'GET_QUESTION_SPAN(in the first half)'], 'GET_QUESTION_NUMBER(70)']
node: Node = nested_expression_to_tree(p)
print(node._get_nested_expression_with_predicate_and_strings())
print(node.get_nested_expression_with_strings())
print(node.get_nested_expression()) | [
"[email protected]"
] | |
ce7ffbdec0eb226aa156c9473f3ba0daa63fab4c | 63191be7f688591af69263972d68423d76fb5f74 | /geekshop/adminapp/controllers/user.py | 05aa0358e05154eb814c7758f5dea946a87f8647 | [] | no_license | IliaNiyazof/Django | 5eee4c226a1f06178fdbb5626444fff406886de7 | 052cb4f3f142c4224454ebac9fb27f63de9cbc47 | refs/heads/master | 2021-07-19T05:52:56.620026 | 2020-06-05T16:17:47 | 2020-06-05T16:17:47 | 166,776,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse_lazy
from django.shortcuts import HttpResponseRedirect
from authapp.models import ShopUser
class UserListView(ListView):
model = ShopUser
template_name = 'adminapp/users/users_index.html'
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, request, *args, **kwargs):
return super(UserListView, self).dispatch(request, *args, **kwargs)
class UserCreateView(CreateView):
model = ShopUser
template_name = 'adminapp/users/update.html'
fields = ('username', 'age', 'password', 'email', 'first_name', 'avatar')
success_url = reverse_lazy('admin:users_index')
class UserUpdateView(UpdateView):
model = ShopUser
template_name = 'adminapp/users/update.html'
fields = ('username', 'age', 'email', 'first_name', 'avatar')
success_url = reverse_lazy('admin:users_index')
def get_context_data(self, **kwargs):
parent_context = super(UserUpdateView, self).get_context_data(**kwargs)
parent_context['title'] = 'пользователи/создание'
return parent_context
class UserDeleteView(DeleteView):
model = ShopUser
template_name = 'adminapp/users/delete.html'
success_url = reverse_lazy('admin:users_index')
def delete(self, request, *args, **kwargs):
self.object = self.get_object() #получаем пользователя
self.object.is_active = False
self.object.save()
return HttpResponseRedirect(self.get_success_url())
| [
"[email protected]"
] | |
3242ea4001760905374ab93d15598db51c8318dc | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/audio/tdnn/speechbrain/lobes/models/transformer/TransformerASR.py | 631c6e8b82bf406d9a02a9aa6393ec730669b547 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 13,392 | py | """Transformer for ASR in the SpeechBrain style.
Authors
* Jianyuan Zhong 2020
"""
import torch # noqa 42
from torch import nn
from typing import Optional
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.containers import ModuleList
from speechbrain.lobes.models.transformer.Transformer import (
TransformerInterface,
get_lookahead_mask,
get_key_padding_mask,
NormalizedEmbedding,
)
from speechbrain.nnet.activations import Swish
from speechbrain.dataio.dataio import length_to_mask
class TransformerASR(TransformerInterface):
"""This is an implementation of transformer model for ASR.
The architecture is based on the paper "Attention Is All You Need":
https://arxiv.org/pdf/1706.03762.pdf
Arguments
----------
tgt_vocab: int
Size of vocabulary.
input_size: int
Input feature size.
d_model : int, optional
Embedding dimension size.
(default=512).
nhead : int, optional
The number of heads in the multi-head attention models (default=8).
num_encoder_layers : int, optional
The number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers : int, optional
The number of sub-decoder-layers in the decoder (default=6).
dim_ffn : int, optional
The dimension of the feedforward network model (default=2048).
dropout : int, optional
The dropout value (default=0.1).
activation : torch.nn.Module, optional
The activation function of FFN layers.
Recommended: relu or gelu (default=relu).
positional_encoding: str, optional
Type of positional encoding used. e.g. 'fixed_abs_sine' for fixed absolute positional encodings.
normalize_before: bool, optional
Whether normalization should be applied before or after MHA or FFN in Transformer layers.
Defaults to True as this was shown to lead to better performance and training stability.
kernel_size: int, optional
Kernel size in convolutional layers when Conformer is used.
bias: bool, optional
Whether to use bias in Conformer convolutional layers.
encoder_module: str, optional
Choose between Conformer and Transformer for the encoder. The decoder is fixed to be a Transformer.
conformer_activation: torch.nn.Module, optional
Activation module used after Conformer convolutional layers. E.g. Swish, ReLU etc. it has to be a torch Module.
branchformer_activation: torch.nn.Module, optional
Activation module used within the Branchformer Encoder. E.g. Swish, ReLU etc. it has to be a torch Module.
attention_type: str, optional
Type of attention layer used in all Transformer or Conformer layers.
e.g. regularMHA or RelPosMHA.
max_length: int, optional
Max length for the target and source sequence in input.
Used for positional encodings.
causal: bool, optional
Whether the encoder should be causal or not (the decoder is always causal).
If causal the Conformer convolutional layer is causal.
csgu_linear_units: int, optional
Number of neurons in the hidden linear units of the CSGU Module.
-> Branchformer
gate_activation: torch.nn.Module, optional
Activation function used at the gate of the CSGU module.
-> Branchformer
use_linear_after_conv: bool, optional
If True, will apply a linear transformation of size input_size//2.
-> Branchformer
Example
-------
>>> src = torch.rand([8, 120, 512])
>>> tgt = torch.randint(0, 720, [8, 120])
>>> net = TransformerASR(
... 720, 512, 512, 8, 1, 1, 1024, activation=torch.nn.GELU
... )
>>> enc_out, dec_out = net.forward(src, tgt)
>>> enc_out.shape
torch.Size([8, 120, 512])
>>> dec_out.shape
torch.Size([8, 120, 512])
"""
def __init__(
self,
tgt_vocab,
input_size,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
d_ffn=2048,
dropout=0.1,
activation=nn.ReLU,
positional_encoding="fixed_abs_sine",
normalize_before=False,
kernel_size: Optional[int] = 31,
bias: Optional[bool] = True,
encoder_module: Optional[str] = "transformer",
conformer_activation: Optional[nn.Module] = Swish,
branchformer_activation: Optional[nn.Module] = nn.GELU,
attention_type: Optional[str] = "regularMHA",
max_length: Optional[int] = 2500,
causal: Optional[bool] = True,
csgu_linear_units: Optional[int] = 3072,
gate_activation: Optional[nn.Module] = nn.Identity,
use_linear_after_conv: Optional[bool] = False,
):
super().__init__(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
d_ffn=d_ffn,
dropout=dropout,
activation=activation,
positional_encoding=positional_encoding,
normalize_before=normalize_before,
kernel_size=kernel_size,
bias=bias,
encoder_module=encoder_module,
conformer_activation=conformer_activation,
branchformer_activation=branchformer_activation,
attention_type=attention_type,
max_length=max_length,
causal=causal,
csgu_linear_units=csgu_linear_units,
gate_activation=gate_activation,
use_linear_after_conv=use_linear_after_conv,
)
self.custom_src_module = ModuleList(
Linear(
input_size=input_size,
n_neurons=d_model,
bias=True,
combine_dims=False,
),
torch.nn.Dropout(dropout),
)
self.custom_tgt_module = ModuleList(
NormalizedEmbedding(d_model, tgt_vocab)
)
# reset parameters using xavier_normal_
self._init_params()
def forward(self, src, tgt, wav_len=None, pad_idx=0):
"""
Arguments
----------
src : torch.Tensor
The sequence to the encoder.
tgt : torch.Tensor
The sequence to the decoder.
wav_len: torch.Tensor, optional
Torch Tensor of shape (batch, ) containing the relative length to padded length for each example.
pad_idx : int, optional
The index for <pad> token (default=0).
"""
# reshpae the src vector to [Batch, Time, Fea] is a 4d vector is given
if src.ndim == 4:
bz, t, ch1, ch2 = src.shape
src = src.reshape(bz, t, ch1 * ch2)
(
src_key_padding_mask,
tgt_key_padding_mask,
src_mask,
tgt_mask,
) = self.make_masks(src, tgt, wav_len, pad_idx=pad_idx)
src = self.custom_src_module(src)
# add pos encoding to queries if are sinusoidal ones else
if self.attention_type == "RelPosMHAXL":
pos_embs_encoder = self.positional_encoding(src)
elif self.positional_encoding_type == "fixed_abs_sine":
src = src + self.positional_encoding(src) # add the encodings here
pos_embs_encoder = None
encoder_out, _ = self.encoder(
src=src,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs_encoder,
)
tgt = self.custom_tgt_module(tgt)
# Add positional encoding to the target before feeding the decoder.
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
pos_embs_encoder = None # self.positional_encoding(src)
pos_embs_target = None
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
pos_embs_target = None
pos_embs_encoder = None
decoder_out, _, _ = self.decoder(
tgt=tgt,
memory=encoder_out,
memory_mask=src_mask,
tgt_mask=tgt_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask,
pos_embs_tgt=pos_embs_target,
pos_embs_src=pos_embs_encoder,
)
return encoder_out, decoder_out
def make_masks(self, src, tgt, wav_len=None, pad_idx=0):
"""This method generates the masks for training the transformer model.
Arguments
---------
src : tensor
The sequence to the encoder (required).
tgt : tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
"""
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.round(wav_len * src.shape[1])
src_key_padding_mask = ~length_to_mask(abs_len).bool()
tgt_key_padding_mask = get_key_padding_mask(tgt, pad_idx=pad_idx)
src_mask = None
tgt_mask = get_lookahead_mask(tgt)
return src_key_padding_mask, tgt_key_padding_mask, src_mask, tgt_mask
@torch.no_grad()
def decode(self, tgt, encoder_out, enc_len=None):
"""This method implements a decoding step for the transformer model.
Arguments
---------
tgt : torch.Tensor
The sequence to the decoder.
encoder_out : torch.Tensor
Hidden output of the encoder.
enc_len : torch.LongTensor
The actual length of encoder states.
"""
tgt_mask = get_lookahead_mask(tgt)
src_key_padding_mask = None
if enc_len is not None:
src_key_padding_mask = (1 - length_to_mask(enc_len)).bool()
tgt = self.custom_tgt_module(tgt)
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
pos_embs_encoder = None # self.positional_encoding(src)
pos_embs_target = None
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
pos_embs_target = None
pos_embs_encoder = None
prediction, self_attns, multihead_attns = self.decoder(
tgt,
encoder_out,
tgt_mask=tgt_mask,
memory_key_padding_mask=src_key_padding_mask,
pos_embs_tgt=pos_embs_target,
pos_embs_src=pos_embs_encoder,
)
return prediction, multihead_attns[-1]
def encode(self, src, wav_len=None):
"""
Encoder forward pass
Arguments
----------
src : torch.Tensor
The sequence to the encoder.
wav_len: torch.Tensor, optional
Torch Tensor of shape (batch, ) containing the relative length to padded length for each example.
"""
# reshape the src vector to [Batch, Time, Fea] if a 4d vector is given
if src.dim() == 4:
bz, t, ch1, ch2 = src.shape
src = src.reshape(bz, t, ch1 * ch2)
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.floor(wav_len * src.shape[1])
src_key_padding_mask = (
torch.arange(src.shape[1])[None, :].to(abs_len)
> abs_len[:, None]
)
src = self.custom_src_module(src)
if self.attention_type == "RelPosMHAXL":
pos_embs_source = self.positional_encoding(src)
elif self.positional_encoding_type == "fixed_abs_sine":
src = src + self.positional_encoding(src)
pos_embs_source = None
encoder_out, _ = self.encoder(
src=src,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs_source,
)
return encoder_out
def _init_params(self):
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
class EncoderWrapper(nn.Module):
"""This is a wrapper of any ASR transformer encoder. By default, the
TransformerASR .forward() function encodes and decodes. With this wrapper
the .forward() function becomes .encode() only.
Important: The TransformerASR class must contain a .encode() function.
Arguments
----------
transformer : sb.lobes.models.TransformerInterface
A Transformer instance that contains a .encode() function.
Example
-------
>>> src = torch.rand([8, 120, 512])
>>> tgt = torch.randint(0, 720, [8, 120])
>>> net = TransformerASR(
... 720, 512, 512, 8, 1, 1, 1024, activation=torch.nn.GELU
... )
>>> encoder = EncoderWrapper(net)
>>> enc_out = encoder(src)
>>> enc_out.shape
torch.Size([8, 120, 512])
"""
def __init__(self, transformer, *args, **kwargs):
super().__init__(*args, **kwargs)
self.transformer = transformer
def forward(self, x, wav_lens=None):
""" Processes the input tensor x and returns an output tensor."""
x = self.transformer.encode(x, wav_lens)
return x
| [
"[email protected]"
] | |
1a6de88e8985f02d7a949c863b4d01cb2ac488a7 | c3322306696a5e60c4de7e845ed68f58df2b6a0a | /Py/TushareTest/venv/Lib/site-packages/asynq/debug.py | 48b45010aa5044c516125100dbb4696558dd7625 | [] | no_license | lifewwy/future | b19ff997efe5a2d721ff0fd338509dee49b01bac | 916fc0d7c502c98bcbc625f7ccd46483e627576c | refs/heads/master | 2020-03-23T15:51:20.810688 | 2019-03-05T02:37:09 | 2019-03-05T02:37:09 | 141,778,894 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,538 | py | # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import sys
import qcore
import inspect
import linecache
import traceback
import logging
from sys import stderr, stdout
from . import _debug
options = _debug.options # Must be the same object
options.DUMP_PRE_ERROR_STATE = True
options.DUMP_EXCEPTIONS = False
options.DUMP_AWAIT_RECURSION = False
options.DUMP_SCHEDULE_TASK = False
options.DUMP_CONTINUE_TASK = False
options.DUMP_SCHEDULE_BATCH = False
options.DUMP_FLUSH_BATCH = False
options.DUMP_DEPENDENCIES = False
options.DUMP_COMPUTED = False
options.DUMP_NEW_TASKS = False
options.DUMP_YIELD_RESULTS = False
options.DUMP_QUEUED_RESULTS = False
options.DUMP_CONTEXTS = False
options.DUMP_SCHEDULER_CHANGE = False
options.DUMP_SYNC = False
options.DUMP_PRIMER = False
options.DUMP_STACK = False # When it's meaningful, e.g. on batch flush
options.DUMP_SCHEDULER_STATE = False
options.SCHEDULER_STATE_DUMP_INTERVAL = 1 # In seconds
options.DEBUG_STR_REPR_MAX_LENGTH = 240 # In characters, 0 means infinity
options.STACK_DUMP_LIMIT = 10 # In frames, None means infinity
options.ENABLE_COMPLEX_ASSERTIONS = True
def DUMP_ALL(value=None):
return options.DUMP_ALL(value)
# DUMP_ALL(True)
original_hook = None
is_attached = False
_std_str = str
_std_repr = repr
def dump_error(error, tb=None):
"""Dumps errors w/async stack traces."""
try:
stderr.write('\n' + (format_error(error, tb=tb) or 'No error'))
finally:
stdout.flush()
stderr.flush()
def format_error(error, tb=None):
"""Formats errors w/async stack traces."""
if error is None:
return None
result = ''
if hasattr(error, '_traceback') or tb is not None:
tb = tb or error._traceback
result += '\n\nTraceback:\n%s' % ''.join(format_tb(tb))
if isinstance(error, BaseException):
result += '\n' + ''.join(traceback.format_exception_only(error.__class__, error))
return result
class AsynqStackTracebackFormatter(logging.Formatter):
"""Prints traceback skipping asynq frames during logger.exception/error usages."""
def formatException(self, exc_info):
ty, val, tb = exc_info
return format_error(val, tb=tb)
def _should_skip_frame(frame):
if frame:
traceback_hide_directive_name = '__traceback_hide__'
return frame.f_locals.get(
traceback_hide_directive_name,
frame.f_globals.get(
traceback_hide_directive_name,
False)) is True
return False
def extract_tb(tb, limit=None):
"""This implementation is stolen from traceback module but respects __traceback_hide__."""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
tb_list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
if not _should_skip_frame(f):
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
tb_list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n + 1
return tb_list
def format_tb(tb):
"""Formats a tracebacck but skips """
return traceback.format_list(extract_tb(tb))
def dump_stack(skip=0, limit=None):
"""Dumps current stack trace."""
skip += 2 # To skip dump_stack and traceback.extract_stack
if limit is None:
limit = options.STACK_DUMP_LIMIT
print('--- Stack trace: -----------------------------------------------------')
try:
stack = traceback.extract_stack(limit=None if limit is None else limit + skip)
print(''.join(traceback.format_list(stack[:-skip])), end='')
finally:
print('----------------------------------------------------------------------')
stdout.flush()
def dump_asynq_stack():
"""Dumps the current asynq stack to stdout."""
# Doing this in the global scope creates a circular dependency
from .scheduler import get_scheduler
active_task = get_scheduler().active_task
if active_task is not None:
print('\n'.join(active_task.traceback()))
else:
print('dump_asynq_stack: no asynq task currently active')
def dump(state):
if not options.DUMP_PRE_ERROR_STATE:
return
stdout.flush()
stderr.flush()
stdout.write('\n--- Pre-error state dump: --------------------------------------------\n')
try:
state.dump()
finally:
stdout.write('----------------------------------------------------------------------\n')
stderr.write('\n')
stdout.flush()
stderr.flush()
def write(text, indent=0):
if indent > 0:
indent_str = ' ' * indent
text = text.replace('\n', '\n' + indent_str)
if not text.startswith('\n'):
text = indent_str + text
stdout.write(text + '\n')
def str(source, truncate=True):
return qcore.safe_str(source, options.DEBUG_STR_REPR_MAX_LENGTH if truncate else 0)
def repr(source, truncate=True):
return qcore.safe_repr(source, options.DEBUG_STR_REPR_MAX_LENGTH if truncate else 0)
def async_exception_hook(type, error, tb):
"""Exception hook capable of printing async stack traces."""
global original_hook
stdout.flush()
stderr.flush()
if original_hook is not None:
original_hook(type, error, tb)
dump_error(error, tb=tb)
def ipython_custom_exception_handler(self, etype, value, tb, tb_offset=None):
"""Override ipython's exception handler to print async traceback."""
async_exception_hook(etype, value, tb)
# below is the default exception handling behavior of ipython
self.showtraceback()
def attach_exception_hook():
"""Injects async exception hook into the sys.excepthook."""
try:
# detect whether we're running in IPython
__IPYTHON__
except NameError:
shell = None
else:
# override ipython's exception handler if in a shell.
# we need to do this because ipython overrides sys.excepthook
# so just the else block doesn't cover that case.
from IPython.core.getipython import get_ipython
# this may be None if __IPYTHON__ is somehow defined, but we are not
# in fact in a shell
shell = get_ipython()
if shell is not None:
shell.set_custom_exc((BaseException,), ipython_custom_exception_handler)
else:
global is_attached, original_hook
if is_attached:
sys.stderr.write("Warning: async exception hook was already attached.\n")
return
original_hook = sys.excepthook
sys.excepthook = async_exception_hook
is_attached = True
def detach_exception_hook():
"""Removes async exception hook into the sys.excepthook."""
global is_attached, original_hook
assert is_attached, "Async exception hook wasn't attached."
sys.excepthook = original_hook
is_attached = False
@contextlib.contextmanager
def enable_complex_assertions(enable=True):
old = options.ENABLE_COMPLEX_ASSERTIONS
try:
options.ENABLE_COMPLEX_ASSERTIONS = enable
yield None
finally:
options.ENABLE_COMPLEX_ASSERTIONS = old
def disable_complex_assertions():
return enable_complex_assertions(False)
def sync():
assert False, "'import asynq' seems broken: this function must be replaced with async.batching.sync."
def get_frame_info(generator):
"""Given a generator, returns its current frame info."""
if getattr(generator, 'gi_frame', None) is not None:
return inspect.getframeinfo(generator.gi_frame)
return None
| [
"[email protected]"
] | |
452d3cb8763ef5630fe140977e5d2231bfa8a948 | f0316e656767cf505b32c83eef4df13bb9f6b60c | /LeetCode/Python/Medium/113_path_sum_2.py | 06fd48759de839c50e4e5968b112c39a587e6b81 | [] | no_license | AkshdeepSharma/Classroom | 70ec46b35fab5fc4a9d2eac430659d7dafba93da | 4e55799466c101c736de6c7e07d716ff147deb83 | refs/heads/master | 2022-06-13T18:14:03.236503 | 2022-05-17T20:16:28 | 2022-05-17T20:16:28 | 94,828,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, wanted_sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
ans = []
self.dfs(root, wanted_sum, [], ans)
return ans
def dfs(self, root, wanted_sum, temp, ans):
if not root:
return
if not root.left and not root.right and wanted_sum == root.val:
ans.append(temp + [root.val])
return
self.dfs(root.left, wanted_sum - root.val, temp + [root.val], ans)
self.dfs(root.right, wanted_sum - root.val, temp + [root.val], ans)
| [
"[email protected]"
] | |
1c9722a8fd2c2a2eba6c6d1e69415f7e5b34d031 | 6e1549257568a0ca81b3fc5864e2e1fa65171b06 | /salarydk/models/inline_response200105_data.py | 849f613e0b7abadbe1d4fa8130f82fe59c080f22 | [] | no_license | tdwizard/salarydk | 19d3453de8fbdd886a0189dbf232f98de971e18a | dcf5040101b3e576f1068ea104148651e5c66511 | refs/heads/master | 2023-08-05T05:40:09.561288 | 2021-09-24T09:41:43 | 2021-09-24T09:41:43 | 409,910,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,038 | py | # coding: utf-8
"""
Salary.dk API
This is the public API for Salary.dk. # General Our API is a JSON-based, REST-like API. Our webapp uses the exact same API, so everything you can do in our webapp, you can do through our API. However, we are slowly opening up the API, so not all endpoints are documented here yet. Only the endpoints documented here are stable. If there is some functionality you would like to access through our API, please contact us. The API is located at https://api.salary.dk. All requests must use TLS. In order to use the API on behalf of other users than yourself, you need to register as an API client. You do this by sending an e-mail to [email protected] with the name and purpose of your client. API-keys for each account can be obtained once logged in to Salary, under the settings for the Company. All endpoints are documented to be able to return the 500 error code. We strive to not return this error code, so if you do encounter this error code, it might mean there is an error on our side. In this case, do not hesitate to contact us. # Versioning, upgrade and deprecation policy Our API might change over time. In order to ensure a stable API, we follow these rules when changing the API. New fields might be added at any time to any response or as non-required parameters to any input. When adding input fields, we ensure the default behaviour when not supplying the field is the same as the previous version. In these cases, the version of an endpoint is not increased, since it is backwards compatible. Since we might add new fields to responses, be sure to use a JSON parser in your implementation. This ensures that any extra fields added are ignored by your implementation. We might add entirely new endpoints at any time. If we need to change an existing endpoint without being able to make it backwards compatible, we will add a new version of the endpoint, and mark the old as deprecated but still functional. We will then contact any users of the deprecated endpoint and ensure an upgrade is performed. Once all consumers have moved to the new endpoint version, the old one will be removed. We will not at any point change the meaning of any existing field, nor will we remove any field or endpoint without following the above deprecated procedure. However, we might add new types to existing enums at any time. # Cross-Origin Resource Sharing This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/) - and that allows cross-domain communication from the browser. All responses have a wildcard same-origin which makes them completely public and accessible to everyone, including any code on any site, as long as the proper access token is passed. # Authentication All request require an access token. There are two ways to obtain an access token: * Logging in as a user. (this endpoint is not yet publicly available). * Using an API-key: [endpoint](#operation/APIClientLogin) Using one of these methods, you will obtain an access token. In all subsequest requests, this access token should be passed in the Authorization header. The access token is valid for around one hour, after which a new token should be obtained. You do not need to dispose of access tokens once created. They have a limited lifetime, and Salary.dk will automatically expire old ones. For some endpoints, the authorizedUserQuery security definition is used. This allows for passing the access token as a query parameter where it is not possible to pass it as a header. In particular, this is used for downloading files. <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from salarydk.configuration import Configuration
class InlineResponse200105Data(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'challenge_id': 'str',
'key': 'str',
'qr_image': 'str'
}
attribute_map = {
'challenge_id': 'challengeID',
'key': 'key',
'qr_image': 'qrImage'
}
def __init__(self, challenge_id=None, key=None, qr_image=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse200105Data - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._challenge_id = None
self._key = None
self._qr_image = None
self.discriminator = None
self.challenge_id = challenge_id
if key is not None:
self.key = key
if qr_image is not None:
self.qr_image = qr_image
@property
def challenge_id(self):
"""Gets the challenge_id of this InlineResponse200105Data. # noqa: E501
The ID of the challenge to complete to complete the enrollment # noqa: E501
:return: The challenge_id of this InlineResponse200105Data. # noqa: E501
:rtype: str
"""
return self._challenge_id
@challenge_id.setter
def challenge_id(self, challenge_id):
"""Sets the challenge_id of this InlineResponse200105Data.
The ID of the challenge to complete to complete the enrollment # noqa: E501
:param challenge_id: The challenge_id of this InlineResponse200105Data. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and challenge_id is None: # noqa: E501
raise ValueError("Invalid value for `challenge_id`, must not be `None`") # noqa: E501
self._challenge_id = challenge_id
@property
def key(self):
"""Gets the key of this InlineResponse200105Data. # noqa: E501
For TOTP, the secret key to present to the user. # noqa: E501
:return: The key of this InlineResponse200105Data. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this InlineResponse200105Data.
For TOTP, the secret key to present to the user. # noqa: E501
:param key: The key of this InlineResponse200105Data. # noqa: E501
:type: str
"""
self._key = key
@property
def qr_image(self):
"""Gets the qr_image of this InlineResponse200105Data. # noqa: E501
For TOTP, an image of the QR code to present to the user. Base64 encoded. # noqa: E501
:return: The qr_image of this InlineResponse200105Data. # noqa: E501
:rtype: str
"""
return self._qr_image
@qr_image.setter
def qr_image(self, qr_image):
"""Sets the qr_image of this InlineResponse200105Data.
For TOTP, an image of the QR code to present to the user. Base64 encoded. # noqa: E501
:param qr_image: The qr_image of this InlineResponse200105Data. # noqa: E501
:type: str
"""
self._qr_image = qr_image
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse200105Data):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse200105Data):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
7bbaf9dcf2634b177bd8a31f8aab3393fe4990b6 | c086a38a366b0724d7339ae94d6bfb489413d2f4 | /PythonEnv/Lib/site-packages/win32/Demos/win32netdemo.py | eb7e3478f3e4a966e759bd55a82605411e1dd78e | [] | no_license | FlowkoHinti/Dionysos | 2dc06651a4fc9b4c8c90d264b2f820f34d736650 | d9f8fbf3bb0713527dc33383a7f3e135b2041638 | refs/heads/master | 2021-03-02T01:14:18.622703 | 2020-06-09T08:28:44 | 2020-06-09T08:28:44 | 245,826,041 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,640 | py | import sys
import win32api
import win32net
import win32netcon
import win32security
import getopt
import traceback
verbose_level = 0
server = None # Run on local machine.
def verbose(msg):
if verbose_level:
print(msg)
def CreateUser():
"Creates a new test user, then deletes the user"
testName = "PyNetTestUser"
try:
win32net.NetUserDel(server, testName)
print("Warning - deleted user before creating it!")
except win32net.error:
pass
d = {}
d['name'] = testName
d['password'] = 'deleteme'
d['priv'] = win32netcon.USER_PRIV_USER
d['comment'] = "Delete me - created by Python test code"
d['flags'] = win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT
win32net.NetUserAdd(server, 1, d)
try:
try:
win32net.NetUserChangePassword(server, testName, "wrong", "new")
print("ERROR: NetUserChangePassword worked with a wrong password!")
except win32net.error:
pass
win32net.NetUserChangePassword(server, testName, "deleteme", "new")
finally:
win32net.NetUserDel(server, testName)
print("Created a user, changed their password, and deleted them!")
def UserEnum():
"Enumerates all the local users"
resume = 0
nuser = 0
while 1:
data, total, resume = win32net.NetUserEnum(server, 3, win32netcon.FILTER_NORMAL_ACCOUNT, resume)
verbose("Call to NetUserEnum obtained %d entries of %d total" % (len(data), total))
for user in data:
verbose("Found user %s" % user['name'])
nuser = nuser + 1
if not resume:
break
assert nuser, "Could not find any users!"
print("Enumerated all the local users")
def GroupEnum():
"Enumerates all the domain groups"
nmembers = 0
resume = 0
while 1:
data, total, resume = win32net.NetGroupEnum(server, 1, resume)
# print "Call to NetGroupEnum obtained %d entries of %d total" % (len(data), total)
for group in data:
verbose("Found group %(name)s:%(comment)s " % group)
memberresume = 0
while 1:
memberdata, total, memberresume = win32net.NetGroupGetUsers(server, group['name'], 0, resume)
for member in memberdata:
verbose(" Member %(name)s" % member)
nmembers = nmembers + 1
if memberresume == 0:
break
if not resume:
break
assert nmembers, "Couldnt find a single member in a single group!"
print("Enumerated all the groups")
def LocalGroupEnum():
"Enumerates all the local groups"
resume = 0
nmembers = 0
while 1:
data, total, resume = win32net.NetLocalGroupEnum(server, 1, resume)
for group in data:
verbose("Found group %(name)s:%(comment)s " % group)
memberresume = 0
while 1:
memberdata, total, memberresume = win32net.NetLocalGroupGetMembers(server, group['name'], 2, resume)
for member in memberdata:
# Just for the sake of it, we convert the SID to a username
username, domain, type = win32security.LookupAccountSid(server, member['sid'])
nmembers = nmembers + 1
verbose(" Member %s (%s)" % (username, member['domainandname']))
if memberresume == 0:
break
if not resume:
break
assert nmembers, "Couldnt find a single member in a single group!"
print("Enumerated all the local groups")
def ServerEnum():
"Enumerates all servers on the network"
resume = 0
while 1:
data, total, resume = win32net.NetServerEnum(server, 100, win32netcon.SV_TYPE_ALL, None, resume)
for s in data:
verbose("Found server %s" % s['name'])
# Now loop over the shares.
shareresume = 0
while 1:
sharedata, total, shareresume = win32net.NetShareEnum(server, 2, shareresume)
for share in sharedata:
verbose(" %(netname)s (%(path)s):%(remark)s - in use by %(current_uses)d users" % share)
if not shareresume:
break
if not resume:
break
print("Enumerated all the servers on the network")
def LocalGroup(uname=None):
"Creates a local group, adds some members, deletes them, then removes the group"
level = 3
if uname is None: uname = win32api.GetUserName()
if uname.find("\\") < 0:
uname = win32api.GetDomainName() + "\\" + uname
group = 'python_test_group'
# delete the group if it already exists
try:
win32net.NetLocalGroupDel(server, group)
print("WARNING: existing local group '%s' has been deleted.")
except win32net.error:
pass
group_data = {'name': group}
win32net.NetLocalGroupAdd(server, 1, group_data)
try:
u = {'domainandname': uname}
win32net.NetLocalGroupAddMembers(server, group, level, [u])
mem, tot, res = win32net.NetLocalGroupGetMembers(server, group, level)
print("members are", mem)
if mem[0]['domainandname'] != uname:
print("ERROR: LocalGroup just added %s, but members are %r" % (uname, mem))
# Convert the list of dicts to a list of strings.
win32net.NetLocalGroupDelMembers(server, group, [m['domainandname'] for m in mem])
finally:
win32net.NetLocalGroupDel(server, group)
print("Created a local group, added and removed members, then deleted the group")
def GetInfo(userName=None):
"Dumps level 3 information about the current user"
if userName is None: userName = win32api.GetUserName()
print("Dumping level 3 information about user")
info = win32net.NetUserGetInfo(server, userName, 3)
for key, val in list(info.items()):
verbose("%s=%s" % (key, val))
def SetInfo(userName=None):
"Attempts to change the current users comment, then set it back"
if userName is None: userName = win32api.GetUserName()
oldData = win32net.NetUserGetInfo(server, userName, 3)
try:
d = oldData.copy()
d["usr_comment"] = "Test comment"
win32net.NetUserSetInfo(server, userName, 3, d)
new = win32net.NetUserGetInfo(server, userName, 3)['usr_comment']
if str(new) != "Test comment":
raise RuntimeError("Could not read the same comment back - got %s" % new)
print("Changed the data for the user")
finally:
win32net.NetUserSetInfo(server, userName, 3, oldData)
def SetComputerInfo():
"Doesnt actually change anything, just make sure we could ;-)"
info = win32net.NetWkstaGetInfo(None, 502)
# *sob* - but we can't! Why not!!!
# win32net.NetWkstaSetInfo(None, 502, info)
def usage(tests):
import os
print("Usage: %s [-s server ] [-v] [Test ...]" % os.path.basename(sys.argv[0]))
print(" -v : Verbose - print more information")
print(" -s : server - execute the tests against the named server")
print(" -c : include the CreateUser test by default")
print("where Test is one of:")
for t in tests:
print(t.__name__, ":", t.__doc__)
print()
print("If not tests are specified, all tests are run")
sys.exit(1)
def main():
tests = []
for ob in list(globals().values()):
if type(ob) == type(main) and ob.__doc__:
tests.append(ob)
opts, args = getopt.getopt(sys.argv[1:], "s:hvc")
create_user = False
for opt, val in opts:
if opt == "-s":
global server
server = val
if opt == "-h":
usage(tests)
if opt == "-v":
global verbose_level
verbose_level = verbose_level + 1
if opt == "-c":
create_user = True
if len(args) == 0:
print("Running all tests - use '-h' to see command-line options...")
dotests = tests
if not create_user:
dotests.remove(CreateUser)
else:
dotests = []
for arg in args:
for t in tests:
if t.__name__ == arg:
dotests.append(t)
break
else:
print("Test '%s' unknown - skipping" % arg)
if not len(dotests):
print("Nothing to do!")
usage(tests)
for test in dotests:
try:
test()
except:
print("Test %s failed" % test.__name__)
traceback.print_exc()
if __name__ == '__main__':
main()
| [
"="
] | = |
c0913253a2a5b39283be769ece6b4ad5d083e695 | 2c635d6b558a65e62a9d37c12abf9e4ecbe8938c | /Interleaving String/Interleaving String.py | 078587fe65f8837338f6aae0a16ec2d3583abaf1 | [] | no_license | GreatStephen/MyLeetcodeSolutions | c698e13b7088fc9236250b6ec10331b88fe99ed1 | 73a8f79f2cd5c769b195c503f0346893b102acdc | refs/heads/master | 2023-03-01T04:53:19.698040 | 2021-02-05T22:28:18 | 2021-02-05T22:28:18 | 284,350,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | class Solution:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
# 比较简单的dp,dp[i][j]检查s1[:i]和s2[:j]能否组成s3[:i+j]
if len(s1)+len(s2)!=len(s3): return False
R,C = len(s1), len(s2)
dp = [[False]*(C+1) for i in range(R+1)]
dp[0][0] = True
for i in range(R):
if s1[i]==s3[i] and dp[i][0]:
dp[i+1][0] = True
for j in range(C):
if s2[j]==s3[j] and dp[0][j]:
dp[0][j+1] = True
for i in range(1, R+1):
for j in range(1, C+1):
if not dp[i-1][j] and not dp[i][j-1]:
continue
if dp[i][j-1] and s2[j-1]==s3[i+j-1]: # 当前字符与s2相等,检查左边==True
dp[i][j] = True
elif dp[i-1][j] and s1[i-1]==s3[i+j-1]: # 当前字符与s1相等,检查上边==True
dp[i][j] = True
return dp[-1][-1] | [
"[email protected]"
] | |
2eda2055c3ab058a4a5366e1564e1d8455fe7dd3 | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/mgmt/compute/models/ssh_public_key.py | 0eb39d154705f352f3944718f3a54a63168a0c13 | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 1,813 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SshPublicKey(Model):
"""
Contains information about SSH certificate public key and the path on the
Linux VM where the public key is placed.
:param path: Gets or sets the full path on the created VM where SSH
public key is stored. If the file already exists, the specified key is
appended to the file.
:type path: str
:param key_data: Gets or sets Certificate public key used to authenticate
with VM through SSH.The certificate must be in Pem format with or
without headers.
:type key_data: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(self, path=None, key_data=None):
self.path = path
self.key_data = key_data
| [
"[email protected]"
] | |
31f3a8fd24cca93817cf22a6198a2aeb399d5091 | 099b4f825cf6ccf7a9795154f2d7a7daa64d4691 | /Python_Programming_For_The_Absolute_Begginer_Scripts/HeroInventoryThree.py | a4f02b299117ad2f7f80a9e270f4c94c8a4214da | [] | no_license | espiercy/py_junkyard | 22637f4b1056cd7571d99dfc14e27a0590695733 | 48204ddd00a366e67e98e2d6a01921b659677d57 | refs/heads/master | 2020-03-23T11:27:46.779467 | 2018-09-22T12:55:08 | 2018-09-22T12:55:08 | 141,504,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | #Hero's Inventory 3.0
#Demonstrates lists
#Evan Piercy
#3.21.15
#creates a list with some items and displays with a for loop
inventory = ["sword" , "armor" , "shield" , "healing potion"]
print("Your items: ")
for item in inventory:
print(item)
input("\nPress the enter key to continue.")
#get length of list
print("You have " , len(inventory), "items in your possession.")
input("\nPress the enter key to continue.")
#test for membership with in
if "healing potion" in inventory:
print("You will live to fight another day.")
#display one item through an index
index = int(input("\nEnter the index number for an item in inventory: "))
print("At index " , index , " is " , inventory[index])
#display a slice
start = int(input("Enter the index number to begin a slice: "))
finish = int(input("Enter the index number to end the slice: "))
print("Inventory[" , start , ":" , finish , "] is", end=" ")
print(inventory[start:finish])
input("\nPress the enter key to continue.")
#concatenate two lists
chest = ["gold","gems"]
print("You find a chest wich contains: ")
print(chest)
inventory += chest
print("Your inventory is now: ")
print(inventory)
#assign by index
print("You trade your sword for a crossbow.")
inventory[0] = "crossbow"
print("Your inventory is now: ")
print(inventory)
input("\nPress the enter key to continue.")
#assign by slice
print("You use your gold and gems to buy an orb of future telling.")
inventory[4:6] = ["orb of future telling"]
print("Your inventory is now: ")
print(inventory)
#delete an element
print("In a great battle, your shield is destroyed.")
del inventory[2]
print("Your inventory is now: ")
print(inventory)
input("\nPress the enter key to continue.")
#delete a silce
print("Your crossbow and armor are stolen by thieves.")
del inventory[:2]
print("Your inventory is now: ")
print(inventory)
input("\nPress the enter key to exit.")
| [
"[email protected]"
] | |
3b930786060a864888d8a3a196cfb8da1e0515ef | e9881daa8b4b711ae0c07f7420b3a78193e6c590 | /dataManager/mixed_aishell_tfrecord_io.py | 398d1ab71fdc4daa5f334f1d42b5d820e16edec2 | [] | no_license | IMLHF/Maskbased_Speech_Enhancement | bc81b8e929c49a09cb8bf9e5e891efda4fe2bca9 | f95f630499587d308ff316d34a34c04821f9ea12 | refs/heads/master | 2020-04-17T03:37:36.655021 | 2019-03-10T13:21:22 | 2019-03-10T13:21:22 | 166,192,504 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,047 | py | import tensorflow as tf
import numpy as np
import librosa
import os
import shutil
import time
import multiprocessing
import copy
import scipy.io
import datetime
import wave
import utils
from utils import audio_tool
from utils import spectrum_tool
from numpy import linalg
from FLAGS import PARAM
FILE_NAME = __file__[max(__file__.rfind('/')+1, 0):__file__.rfind('.')]
def _ini_data(wave_dir, noise_dir, out_dir):
data_dict_dir = out_dir
if os.path.exists(data_dict_dir):
shutil.rmtree(data_dict_dir)
os.makedirs(data_dict_dir)
clean_wav_speaker_set_dir = wave_dir
os.makedirs(data_dict_dir+'/train')
os.makedirs(data_dict_dir+'/validation')
os.makedirs(data_dict_dir+'/test_cc')
cwl_train_file = open(data_dict_dir+'/train/clean_wav_dir.list', 'a+')
cwl_validation_file = open(
data_dict_dir+'/validation/clean_wav_dir.list', 'a+')
cwl_test_cc_file = open(data_dict_dir+'/test_cc/clean_wav_dir.list', 'a+')
clean_wav_list_train = []
clean_wav_list_validation = []
clean_wav_list_test_cc = []
speaker_list = os.listdir(clean_wav_speaker_set_dir)
speaker_list.sort()
for speaker_name in speaker_list:
speaker_dir = clean_wav_speaker_set_dir+'/'+speaker_name
if os.path.isdir(speaker_dir):
speaker_wav_list = os.listdir(speaker_dir)
speaker_wav_list.sort()
for wav in speaker_wav_list[:PARAM.UTT_SEG_FOR_MIX[0]]:
# 清洗长度为0的数据
if wav[-4:] == ".wav" and os.path.getsize(speaker_dir+'/'+wav) > 2048:
cwl_train_file.write(speaker_dir+'/'+wav+'\n')
clean_wav_list_train.append(speaker_dir+'/'+wav)
for wav in speaker_wav_list[PARAM.UTT_SEG_FOR_MIX[0]:PARAM.UTT_SEG_FOR_MIX[1]]:
if wav[-4:] == ".wav" and os.path.getsize(speaker_dir+'/'+wav) > 2048:
cwl_validation_file.write(speaker_dir+'/'+wav+'\n')
clean_wav_list_validation.append(speaker_dir+'/'+wav)
for wav in speaker_wav_list[PARAM.UTT_SEG_FOR_MIX[1]:]:
if wav[-4:] == ".wav" and os.path.getsize(speaker_dir+'/'+wav) > 2048:
cwl_test_cc_file.write(speaker_dir+'/'+wav+'\n')
clean_wav_list_test_cc.append(speaker_dir+'/'+wav)
cwl_train_file.close()
cwl_validation_file.close()
cwl_test_cc_file.close()
print('train clean: '+str(len(clean_wav_list_train)))
print('validation clean: '+str(len(clean_wav_list_validation)))
print('test_cc clean: '+str(len(clean_wav_list_test_cc)))
# NOISE LIST
noise_wav_list = os.listdir(noise_dir)
noise_wav_list = [os.path.join(noise_dir, noise) for noise in noise_wav_list]
dataset_names = PARAM.DATASET_NAMES
dataset_mixedutt_num = PARAM.DATASET_SIZES
all_mixed = 0
all_stime = time.time()
for (clean_wav_list, j) in zip((clean_wav_list_train, clean_wav_list_validation, clean_wav_list_test_cc), range(3)):
print('\n'+dataset_names[j]+" data preparing...")
s_time = time.time()
mixed_wav_list_file = open(
data_dict_dir+'/'+dataset_names[j]+'/mixed_wav_dir.list', 'a+')
mixed_wave_list = []
len_wav_list = len(clean_wav_list)
len_noise_wave_list = len(noise_wav_list)
# print(len_wav_list,len_noise_wave_list)
generated_num = 0
while generated_num < dataset_mixedutt_num[j]:
uttid = np.random.randint(len_wav_list)
noiseid = np.random.randint(len_noise_wave_list)
utt1_dir = clean_wav_list[uttid]
utt2_dir = noise_wav_list[noiseid]
generated_num += 1
mixed_wav_list_file.write(utt1_dir+' '+utt2_dir+'\n')
mixed_wave_list.append([utt1_dir, utt2_dir])
# for i_utt in range(len_wav_list): # n^2混合,数据量巨大
# for j_utt in range(i_utt,len_wav_list):
# utt1_dir=clean_wav_list[i_utt]
# utt2_dir=clean_wav_list[j_utt]
# speaker1 = utt1_dir.split('/')[-2]
# speaker2 = utt2_dir.split('/')[-2]
# if speaker1 == speaker2:
# continue
# mixed_wav_list_file.write(utt1_dir+' '+utt2_dir+'\n')
# mixed_wave_list.append([utt1_dir, utt2_dir])
mixed_wav_list_file.close()
scipy.io.savemat(
data_dict_dir+'/'+dataset_names[j]+'/mixed_wav_dir.mat', {"mixed_wav_dir": mixed_wave_list})
all_mixed += len(mixed_wave_list)
print(dataset_names[j]+' data preparation over, Mixed num: ' +
str(len(mixed_wave_list))+(', Cost time %dS.') % (time.time()-s_time))
print('\nData preparation over, all mixed num: %d,cost time: %dS' %
(all_mixed, time.time()-all_stime))
def _get_padad_waveData(file):
waveData, sr = audio_tool.read_audio(file)
waveData *= 32767.0
if(sr != PARAM.FS):
print("Audio samplerate error.")
exit(-1)
while len(waveData) < PARAM.LEN_WAWE_PAD_TO:
waveData = np.tile(waveData, 2)
len_wave = len(waveData)
wave_begin = np.random.randint(len_wave-PARAM.LEN_WAWE_PAD_TO+1)
waveData = waveData[wave_begin:wave_begin+PARAM.LEN_WAWE_PAD_TO]
return waveData
def _mix_wav_by_SNR(waveData, noise):
# S = (speech+alpha*noise)/(1+alpha)
snr = np.random.randint(PARAM.MIN_SNR, PARAM.MAX_SNR+1)
As = linalg.norm(waveData)
An = linalg.norm(noise)
alpha = As/(An*(10**(snr/20))) if An != 0 else 0
waveMix = (waveData+alpha*noise)/(1.0+alpha)
return waveMix
def _mix_wav_LINEAR(waveData, noise):
coef = np.random.random()*(PARAM.MAX_COEF-PARAM.MIN_COEF)+PARAM.MIN_COEF
waveMix = (waveData+coef*noise)/(1.0+coef)
return waveMix
def _extract_mag_spec(data):
# 幅度谱
mag_spec = spectrum_tool.magnitude_spectrum_librosa_stft(
data, PARAM.NFFT, PARAM.OVERLAP)
return mag_spec
def _extract_phase(data):
theta = spectrum_tool.phase_spectrum_librosa_stft(data,
PARAM.NFFT,
PARAM.OVERLAP)
return theta
def _extract_feature_x_y_xtheta_ytheta(utt_dir1, utt_dir2):
waveData1 = _get_padad_waveData(utt_dir1)
waveData2 = _get_padad_waveData(utt_dir2)
# utt2作为噪音
if PARAM.MIX_METHOD == 'SNR':
mixedData = _mix_wav_by_SNR(waveData1, waveData2)
if PARAM.MIX_METHOD == 'LINEAR':
mixedData = _mix_wav_LINEAR(waveData1, waveData2)
# write mixed wav
# name1 = utt_dir1[utt_dir1.rfind('/')+1:utt_dir1.rfind('.')]
# name2 = utt_dir2[utt_dir2.rfind('/')+1:]
# utils.audio_tool.write_audio('mixwave/mixed_'+name1+"_"+name2,
# mixedData,16000,16,'wav')
X = _extract_mag_spec(mixedData)
Y = _extract_mag_spec(waveData1)
x_theta = _extract_phase(mixedData)
y_theta = _extract_phase(waveData1)
return [X, Y, x_theta, y_theta]
def parse_func(example_proto):
sequence_features = {
'inputs': tf.FixedLenSequenceFeature(shape=[PARAM.INPUT_SIZE],
dtype=tf.float32),
'labels': tf.FixedLenSequenceFeature(shape=[PARAM.OUTPUT_SIZE],
dtype=tf.float32),
'xtheta': tf.FixedLenSequenceFeature(shape=[PARAM.INPUT_SIZE],
dtype=tf.float32),
'ytheta': tf.FixedLenSequenceFeature(shape=[PARAM.OUTPUT_SIZE],
dtype=tf.float32),
}
_, sequence = tf.parse_single_sequence_example(
example_proto, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
return sequence['inputs'], sequence['labels'], 0, 0, length
def parse_func_with_theta(example_proto):
sequence_features = {
'inputs': tf.FixedLenSequenceFeature(shape=[PARAM.INPUT_SIZE],
dtype=tf.float32),
'labels': tf.FixedLenSequenceFeature(shape=[PARAM.OUTPUT_SIZE],
dtype=tf.float32),
'xtheta': tf.FixedLenSequenceFeature(shape=[PARAM.INPUT_SIZE],
dtype=tf.float32),
'ytheta': tf.FixedLenSequenceFeature(shape=[PARAM.OUTPUT_SIZE],
dtype=tf.float32),
}
_, sequence = tf.parse_single_sequence_example(
example_proto, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
return sequence['inputs'], sequence['labels'], sequence['xtheta'], sequence['ytheta'], length
def _gen_tfrecord_minprocess(
dataset_index_list, s_site, e_site, dataset_dir, i_process):
tfrecord_savedir = os.path.join(dataset_dir, ('%08d.tfrecords' % i_process))
with tf.python_io.TFRecordWriter(tfrecord_savedir) as writer:
for i in range(s_site, e_site):
index_ = dataset_index_list[i]
X_Y_Xtheta_Ytheta = _extract_feature_x_y_xtheta_ytheta(index_[0],
index_[1])
X = np.reshape(np.array(X_Y_Xtheta_Ytheta[0], dtype=np.float32),
newshape=[-1, PARAM.INPUT_SIZE])
Y = np.reshape(np.array(X_Y_Xtheta_Ytheta[1], dtype=np.float32),
newshape=[-1, PARAM.OUTPUT_SIZE])
Xtheta = np.reshape(np.array(X_Y_Xtheta_Ytheta[2], dtype=np.float32),
newshape=[-1, PARAM.INPUT_SIZE])
Ytheta = np.reshape(np.array(X_Y_Xtheta_Ytheta[3], dtype=np.float32),
newshape=[-1, PARAM.OUTPUT_SIZE])
# print(np.mean(X),np.sqrt(np.var(X)),np.median(X),np.max(X),np.min(X))
# print(np.mean(X),np.sqrt(np.var(X)),np.median(X),np.max(Y),np.min(Y))
input_features = [
tf.train.Feature(float_list=tf.train.FloatList(value=input_))
for input_ in X]
label_features = [
tf.train.Feature(float_list=tf.train.FloatList(value=label))
for label in Y]
xtheta_features = [
tf.train.Feature(float_list=tf.train.FloatList(value=xtheta))
for xtheta in Xtheta]
ytheta_features = [
tf.train.Feature(float_list=tf.train.FloatList(value=ytheta))
for ytheta in Ytheta]
feature_list = {
'inputs': tf.train.FeatureList(feature=input_features),
'labels': tf.train.FeatureList(feature=label_features),
'xtheta': tf.train.FeatureList(feature=xtheta_features),
'ytheta': tf.train.FeatureList(feature=ytheta_features),
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
record = tf.train.SequenceExample(feature_lists=feature_lists)
writer.write(record.SerializeToString())
writer.flush()
# print(dataset_dir + ('/%08d.tfrecords' % i), 'write done')
def generate_tfrecord(gen=True):
tfrecords_dir = PARAM.TFRECORDS_DIR
train_tfrecords_dir = os.path.join(tfrecords_dir, 'train')
val_tfrecords_dir = os.path.join(tfrecords_dir, 'validation')
testcc_tfrecords_dir = os.path.join(tfrecords_dir, 'test_cc')
dataset_dir_list = [train_tfrecords_dir,
val_tfrecords_dir,
testcc_tfrecords_dir]
if gen:
_ini_data(PARAM.RAW_DATA, PARAM.NOISE_DIR, PARAM.DATA_DICT_DIR)
if os.path.exists(train_tfrecords_dir):
shutil.rmtree(train_tfrecords_dir)
if os.path.exists(val_tfrecords_dir):
shutil.rmtree(val_tfrecords_dir)
if os.path.exists(testcc_tfrecords_dir):
shutil.rmtree(testcc_tfrecords_dir)
os.makedirs(train_tfrecords_dir)
os.makedirs(val_tfrecords_dir)
os.makedirs(testcc_tfrecords_dir)
gen_start_time = time.time()
pool = multiprocessing.Pool(PARAM.PROCESS_NUM_GENERATE_TFERCORD)
for dataset_dir in dataset_dir_list:
# start_time = time.time()
dataset_index_list = None
if dataset_dir[-2:] == 'in':
# continue
dataset_index_list = scipy.io.loadmat(
'_data/mixed_aishell/train/mixed_wav_dir.mat')["mixed_wav_dir"]
elif dataset_dir[-2:] == 'on':
dataset_index_list = scipy.io.loadmat(
'_data/mixed_aishell/validation/mixed_wav_dir.mat')["mixed_wav_dir"]
elif dataset_dir[-2:] == 'cc':
dataset_index_list = scipy.io.loadmat(
'_data/mixed_aishell/test_cc/mixed_wav_dir.mat')["mixed_wav_dir"]
# 使用.mat,字符串长度会强制对齐,所以去掉空格
dataset_index_list = [[index_[0].replace(' ', ''),
index_[1].replace(' ', '')] for index_ in dataset_index_list]
len_dataset = len(dataset_index_list)
minprocess_utt_num = int(
len_dataset/PARAM.TFRECORDS_NUM)
for i_process in range(PARAM.TFRECORDS_NUM):
s_site = i_process*minprocess_utt_num
e_site = s_site+minprocess_utt_num
if i_process == (PARAM.TFRECORDS_NUM-1):
e_site = len_dataset
# print(s_site,e_site)
pool.apply_async(_gen_tfrecord_minprocess,
(dataset_index_list,
s_site,
e_site,
dataset_dir,
i_process))
# _gen_tfrecord_minprocess(dataset_index_list,
# s_site,
# e_site,
# dataset_dir,
# i_process)
# print(dataset_dir+' set extraction over. cost time %06dS' %
# (time.time()-start_time))
pool.close()
pool.join()
print('Generate TFRecord over. cost time %06dS' %
(time.time()-gen_start_time))
train_set = os.path.join(train_tfrecords_dir, '*.tfrecords')
val_set = os.path.join(val_tfrecords_dir, '*.tfrecords')
testcc_set = os.path.join(testcc_tfrecords_dir, '*.tfrecords')
return train_set, val_set, testcc_set
def get_batch_use_tfdata(tfrecords_list, get_theta=False):
files = tf.data.Dataset.list_files(tfrecords_list)
files = files.take(PARAM.MAX_TFRECORD_FILES_USED)
if PARAM.SHUFFLE:
files = files.shuffle(PARAM.PROCESS_NUM_GENERATE_TFERCORD)
if not PARAM.SHUFFLE:
dataset = files.interleave(tf.data.TFRecordDataset,
cycle_length=1,
block_length=PARAM.batch_size,
# num_parallel_calls=1,
)
else: # shuffle
dataset = files.interleave(tf.data.TFRecordDataset,
cycle_length=PARAM.batch_size*3,
# block_length=1,
num_parallel_calls=PARAM.num_threads_processing_data,
)
if PARAM.SHUFFLE:
dataset = dataset.shuffle(PARAM.batch_size*3)
# region
# !tf.data with tf.device(cpu) OOM???
# dataset = dataset.map(
# map_func=parse_func,
# num_parallel_calls=NNET_PARAM.num_threads_processing_data)
# dataset = dataset.padded_batch(
# NNET_PARAM.batch_size,
# padded_shapes=([None, NNET_PARAM.INPUT_SIZE],
# [None, NNET_PARAM.OUTPUT_SIZE],
# [None, NNET_PARAM.OUTPUT_SIZE],
# []))
# endregion
# !map_and_batch efficient is better than map+paded_batch
dataset = dataset.apply(tf.data.experimental.map_and_batch(
map_func=parse_func_with_theta if get_theta else parse_func,
batch_size=PARAM.batch_size,
num_parallel_calls=PARAM.num_threads_processing_data,
# num_parallel_batches=2,
))
# dataset = dataset.prefetch(buffer_size=NNET_PARAM.batch_size) # perfetch 太耗内存,并没有明显的速度提升
dataset_iter = dataset.make_initializable_iterator()
x_batch, y_batch, xtheta, ytheta, lengths_batch = dataset_iter.get_next()
return x_batch, y_batch, xtheta, ytheta, lengths_batch, dataset_iter
def _get_batch_use_tfdata(tfrecords_list, get_theta=False):
files = os.listdir(tfrecords_list[:-11])
files = files[:min(PARAM.MAX_TFRECORD_FILES_USED, len(files))]
files = [os.path.join(tfrecords_list[:-11], file) for file in files]
dataset_list = [tf.data.TFRecordDataset(file).map(parse_func_with_theta if get_theta else parse_func,
num_parallel_calls=PARAM.num_threads_processing_data) for file in files]
num_classes = PARAM.MAX_TFRECORD_FILES_USED
num_classes_per_batch = PARAM.batch_size
num_utt_per_class = PARAM.batch_size//num_classes_per_batch
def generator(_):
# Sample `num_classes_per_batch` classes for the batch
sampled = tf.random_shuffle(tf.range(num_classes))[:num_classes_per_batch]
# Repeat each element `num_images_per_class` times
batch_labels = tf.tile(tf.expand_dims(sampled, -1), [1, num_utt_per_class])
return tf.to_int64(tf.reshape(batch_labels, [-1]))
selector = tf.contrib.data.Counter().map(generator)
selector = selector.apply(tf.contrib.data.unbatch())
dataset = tf.data.experimental.choose_from_datasets(dataset_list, selector)
dataset = dataset.batch(num_classes_per_batch * num_utt_per_class)
# dataset = dataset.prefetch(buffer_size=NNET_PARAM.batch_size) # perfetch 太耗内存,并没有明显的速度提升
dataset_iter = dataset.make_initializable_iterator()
x_batch, y_batch, xtheta, ytheta, lengths_batch = dataset_iter.get_next()
return x_batch, y_batch, xtheta, ytheta, lengths_batch, dataset_iter
| [
"[email protected]"
] | |
d253cc3acea300934927e626015ca9d87ed976b8 | 5215ee22217a0bddc6a6eae3b0e49c1216a77bbc | /snippets/genomescan/extractHmmerResults.py | 3b6b53509ce7664ff31e629035a853ba702a58e0 | [
"Artistic-2.0"
] | permissive | PapenfussLab/Mungo | 5cda4d19a5ef4cb51da495f7abf259b4cd4d1697 | 02c5b0e48ecd28596cb9481b282753859f47fed6 | refs/heads/master | 2021-01-17T07:40:08.832760 | 2015-08-20T01:21:19 | 2015-08-20T01:21:19 | 41,067,597 | 1 | 3 | null | 2016-06-12T23:31:20 | 2015-08-20T01:02:28 | Python | UTF-8 | Python | false | false | 2,136 | py | #!/usr/bin/env python
"""
extractHmmerResults.py <ioDir>
Input: Hmmer output 'DEFB.txt'
Outputs:
- the genomic version (DEFB_genomic.txt)
- a summary (DEFB_summary.txt)
- extracted sequence (DEFB_extracted.fa)
- the translation of this (DEFB_extracted_pep.fa)
Author: Tony Papenfuss
Date: Tue Aug 15 10:18:46 EST 2006
"""
import os, sys
import hmmer, fasta, sequence
homeDir = os.environ['HOME']
blastdb = os.path.join(homeDir, 'databases/opossum/assembly/blastdb/assembly')
ioDir = sys.argv[1]
os.chdir(ioDir)
genomicFile = open('DEFB_genomic.txt', 'w')
summaryFile = open('DEFB_summary.txt', 'w')
dnaWriter = fasta.MfaWriter('DEFB_extracted.fa')
pepWriter = fasta.MfaWriter('DEFB_extracted_pep.fa')
domains = hmmer.loadDomains('DEFB.txt', seqType='BlockSixFrame')
print >> genomicFile, '\t'.join(domains[0].fields
+ ['strand', 'lowScoring', 'pseudogene', 'nCysteines'])
for i,domain in enumerate(domains):
if i>99: break
domain.domain = 'DEFB_%0.2i' % (i+1)
domain.toGenomic(relative=True)
domain.addField('lowScoring', 'N')
domain.addField('pseudogene', 'N')
domain.addField('nCysteines', 0)
summary = []
h,s = fasta.getSequence(blastdb, domain.accession,
start=domain.sStart, end=domain.sEnd, strand=domain.strand)
pep = sequence.translate(s)
if i>59: domain.lowScoring = 'Y'
if '*' in pep:
domain.pseudogene = 'Y'
summary.append('Contains stops')
for aa in pep:
if aa=='C':
domain.nCysteines += 1
if domain.nCysteines!=6:
summary.append('Has %i cysteines' % domain.nCysteines)
print >> summaryFile, '%s\t%s' % (domain.domain, '; '.join(summary))
if domain.pseudogene=='Y' or domain.nCysteines<5 or domain.nCysteines>7:
print 'Skipped', i
else:
h2 = '%s %s:%s-%s(%s)' % (domain.domain, domain.accession, domain.sStart, domain.sEnd, domain.strand)
dnaWriter.write(h2, s + '\n')
pepWriter.write(h2, pep + '\n')
print >> genomicFile, domain
genomicFile.close()
summaryFile.close()
dnaWriter.close()
pepWriter.close()
| [
"[email protected]"
] | |
c0251a93285a9c216b2e657cd6f5ee75e88fd87c | e2f507e0b434120e7f5d4f717540e5df2b1816da | /363-prime.py | eec06779fac42ecf5ef0cc3980108a841e115439 | [] | no_license | ash/amazing_python3 | 70984bd32ae325380382b1fe692c4b359ef23395 | 64c98940f8a8da18a8bf56f65cc8c8e09bd00e0c | refs/heads/master | 2021-06-23T14:59:37.005280 | 2021-01-21T06:56:33 | 2021-01-21T06:56:33 | 182,626,874 | 76 | 25 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | def is_prime(n):
if n < 2: # 1 is not prime
return False
for m in range(2, n):
if n % m == 0:
return False
return True
for x in range(50):
if is_prime(x):
print(x)
| [
"[email protected]"
] | |
f41d9ea36dbee55843bb4d1ff6dba6377fb63e81 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/common/migrations/0016_auto_20190206_2246.py | e67cae7f190201b42f384a4f3f519d771ff61775 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Generated by Django 2.0.5 on 2019-02-06 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0015_remark_status'),
]
operations = [
migrations.AlterField(
model_name='remark',
name='content',
field=models.TextField(),
),
]
| [
"[email protected]"
] | |
bc4fb90c32c6b9ad7010a25b5c8a0a524ed26ae7 | 277d4ee56616bb5930c57a57c68a202bf5085501 | /stubs/torch/nn/parallel/replicate.pyi | f337d4b8bd699302940ca17613883b9bed788aa2 | [
"MIT"
] | permissive | miskolc/spacy-pytorch-transformers | fc502523644eb25cb293e0796b46535ba581a169 | ab132b674c5a91510eb8cc472cdbdf5877d24145 | refs/heads/master | 2020-07-22T09:47:17.905850 | 2019-09-04T15:12:09 | 2019-09-04T15:12:09 | 207,156,566 | 1 | 0 | MIT | 2019-09-08T18:37:55 | 2019-09-08T18:37:55 | null | UTF-8 | Python | false | false | 221 | pyi | # Stubs for torch.nn.parallel.replicate (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
def replicate(network: Any, devices: Any, detach: bool = ...): ...
| [
"[email protected]"
] | |
653f6d79473f69595094f08d93101160a95787c1 | b4c6200590a093b805036a822b7889c058494b9f | /NAIP/filtering.py | 49c68190426cd82bef3e77a82e6cd004d85c5efc | [
"MIT"
] | permissive | spoddar-JNPR/earthengine-py-notebooks | 2109a52a49357c19f803b76ed635e022ee486ac6 | ff1b5754785d5e25cb11acdbd52b0f31711d061f | refs/heads/master | 2022-12-25T10:34:44.895717 | 2020-10-01T05:38:16 | 2020-10-01T05:38:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,455 | py | # %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/filtering.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/filtering.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/filtering.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP')
polys = fromFT.geometry()
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
# lat = 46.80514
# lng = -99.22023
lng_lat = ee.Geometry.Point(lng, lat)
# naip = collection.filterBounds(lng_lat)
naip = collection.filterBounds(polys)
naip_2015 = naip.filterDate('2015-01-01', '2015-12-31')
ppr = naip_2015.mosaic().clip(polys)
# print(naip_2015.size().getInfo())
vis = {'bands': ['N', 'R', 'G']}
Map.setCenter(lng, lat, 10)
# Map.addLayer(naip_2015,vis)
Map.addLayer(ppr,vis)
# Map.addLayer(fromFT)
# image = ee.Image('USDA/NAIP/DOQQ/m_4609915_sw_14_1_20100629')
# Map.setCenter(lng, lat, 12)
# Map.addLayer(image,vis)
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map | [
"[email protected]"
] | |
9650f1d30391a7d26762cbdb090a2f7a374bd8d4 | b731d1b35a5416cdd73d421ea3b88a3a18e4c6d3 | /ecliptic/support/sequtils.py | 8f51fb3e102d63eb8f64f97e3d6b21a2e5d694ec | [] | no_license | xflicsu/ecliptic | ad772d3563cff1875dddc7d29d156093e03afd07 | e9d2e671bcabc5df30ada0cf42953769099ad5d7 | refs/heads/master | 2020-12-28T21:06:37.212834 | 2013-06-18T14:13:23 | 2013-06-18T14:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,702 | py | # Copyright (c) 2011-2012 Hyeshik Chang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# - Hyeshik Chang <[email protected]>
#
__all__ = [
'iterseq_sequential',
'GiantFASTAFile',
'reverse_complement',
'iter_windowed',
'iter_windowed_str',
'get_first_sequence_length',
]
from Bio import SeqIO
import string
import gzip
from collections import deque
import os
import re
def iterseq_sequential(fastapath):
nextid = yield
for seq in SeqIO.parse(open(fastapath, 'r'), format='fasta'):
while seq.name == nextid:
nextid = yield seq
import pysam
# provides an interface like 'samtools faidx'.
whitespace = re.compile('[ \t\r\n]')
class GiantFASTAFile(object):
def __init__(self, filename):
if not os.path.exists(filename + '.fai'):
pysam.faidx(filename)
self.fasta = open(filename)
self.index = self.load_index(filename + '.fai')
def load_index(self, filename):
index = {}
for line in open(filename):
fields = line[:-1].split('\t')
index[fields[0]] = tuple(map(int, fields[1:]))
return index
def get(self, seqid, start=None, stop=None, strand='+'): # zero-based, half-open
length, filepos, colwidth, linesize = self.index[seqid]
if start is None and stop is None:
offset_st = filepos
linenum_en = length // colwidth
offset_en = filepos + length + linenum_en * (linesize - colwidth)
else:
start = max(0, start)
stop = min(length, stop)
linenum_st = start // colwidth
offset_st = filepos + start + linenum_st * (linesize - colwidth)
linenum_en = stop // colwidth
offset_en = filepos + stop + linenum_en * (linesize - colwidth)
self.fasta.seek(offset_st, 0)
seq = whitespace.sub('', self.fasta.read(offset_en - offset_st))
return seq if strand == '+' else reverse_complement(seq)
revcmptrans = string.maketrans('ATUGCatugc', 'TAACGtaacg')
def reverse_complement(seq):
return seq.translate(revcmptrans)[::-1]
def iter_windowed(it, width):
i = iter(it)
queue = deque()
for _ in range(width):
queue.append(i.next())
yield tuple(queue)
for next in i:
queue.popleft()
queue.append(next)
yield tuple(queue)
def iter_windowed_str(it, width):
for r in iter_windowed(it, width):
yield ''.join(r)
def get_first_sequence_length(path, format='fastq-illumina', gzipped=True):
if gzipped:
opener = gzip.open
else:
opener = open
return len(SeqIO.parse(opener(path), format=format).next().seq)
| [
"[email protected]"
] | |
2cb7966f9848c20ac6a089497ae7bb6742d0382b | 7d2ee33675a0b0bd3c25ee22766eca3b658efbab | /tests/validate_json_schemas.py | 879f38c7c26e8804615b48b00ca61940d6ebae9e | [
"MIT"
] | permissive | AD-SDL/polybot-schema | 8caef7d6ae01f5723b01e620a6d364b4fcb2ebc9 | 8bc3dbf39e8d71ac6279baa40f08679a3bdbb80a | refs/heads/main | 2023-06-27T03:50:20.856468 | 2021-07-29T17:30:45 | 2021-07-29T17:30:45 | 389,676,088 | 0 | 0 | MIT | 2021-07-26T17:38:07 | 2021-07-26T15:12:15 | Python | UTF-8 | Python | false | false | 663 | py | from jsonschema import Draft7Validator, RefResolver
from pathlib import Path
import json
# Find all the schemas
schema_path = Path(__file__).parent / '..' / 'json'
schemas = schema_path.rglob('*.json')
# Loop through to make sure they are all valid
for schema in schemas:
print(f'Checking {schema.relative_to(schema_path)}...', end="")
# Load in the schema
with open(schema) as fp:
schema = json.load(fp)
# Pull in the references
validator = Draft7Validator(Draft7Validator.META_SCHEMA,
resolver=RefResolver('file:///{}/'.format(schema_path), schema))
validator.validate(schema)
print('OK')
| [
"[email protected]"
] | |
c30c48fa7dd725d285fdae6c1a5dd6d450f5beef | ce498e1b0fd9c2ae9ebd04c5834f4052a9a46219 | /tuples/sorting-with-value.py | 01e568039bac15ac185b8e92f489cf4c611c3c0e | [] | no_license | wechuli/python | b32e84bb44646810f3f03f9fcfb67e7d8f54ebb6 | 40cc1782724179567128f6da202f166a0c9a0ea1 | refs/heads/master | 2022-02-20T19:18:03.578752 | 2019-09-24T17:19:27 | 2019-09-24T17:19:27 | 117,241,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | my_nary={'The':'Heart','is':"exceedingly",'deceitful':'and','wicked':'above','all':'things'}
my_nary2={'The':5,'is':85,'deceitful':2,'wicked':-8,'all':22,'things':85}
my_list=list(my_nary2.items())
my_lis2=list()
for key,vals in my_list:
my_lis2.append((vals,key))
my_lis2.sort(reverse=True)
print(my_lis2)
| [
"[email protected]"
] | |
327ff89e94562230518071cd35204a19b43fe8bd | b3aafe94bc88f8a9b4cb14422d179752b717173a | /models/master_v2.py | 40d4ddaa75f328830d812252e7ddb381837f397b | [] | no_license | hongweizeng/paragraph-level-QG | 47a5502a8555749ed5b45d00ded9dcb20578c2d8 | 5061d9464fb6c728fd1a943e364b127249f10077 | refs/heads/main | 2023-04-10T20:20:18.831684 | 2021-04-15T08:24:41 | 2021-04-15T08:24:41 | 307,886,614 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,735 | py | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from torch_scatter import scatter_max
from search.searcher import Hypothesis, sort_hypotheses
from models.modules.stacked_rnn import StackedGRU
from models.modules.concat_attention import ConcatAttention
from models.modules.maxout import MaxOut
INF = 1e12
class Embeddings(nn.Module):
"""Construct the embeddings from word, pos_tag, ner_tag, dep_tag, cas_tag and answer_tag embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.feature_tag_embeddings = nn.Embedding(config.feature_tag_vocab_size, config.feature_tag_embedding_size)
self.answer_tag_embeddings = nn.Embedding(config.answer_tag_vocab_size, config.answer_tag_embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
# concatenated_embedding_size = config.embedding_size + config.feature_tag_embedding_size * config.feature_num\
# + config.answer_tag_embedding_size
# self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, feature_tag_ids_dict=None, answer_tag_ids=None):
embeddings = self.word_embeddings(input_ids)
# embeddings = self.LayerNorm(embeddings)
if feature_tag_ids_dict is not None:
for feature_tag_ids in feature_tag_ids_dict.values():
feature_tag_embeddings = self.feature_tag_embeddings(feature_tag_ids)
embeddings = torch.cat([embeddings, feature_tag_embeddings], dim=2)
if answer_tag_ids is not None:
answer_tag_embeddings = self.answer_tag_embeddings(answer_tag_ids)
embeddings = torch.cat([embeddings, answer_tag_embeddings], dim=2)
# embeddings = inputs_embeds + feature_tag_embeddings + answer_tag_embeddings
# embeddings = self.dropout(embeddings)
return embeddings
class BilinearSeqAttn(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False, normalize=True):
super(BilinearSeqAttn, self).__init__()
self.normalize = normalize
self.ysize = y_size
self.xsize = x_size
# If identity is true, we just use a dot product without transformation.
if not identity:
self.linear = nn.Linear(y_size, x_size)
else:
self.linear = None
def forward(self, x, y, x_mask=None):
"""
Args:
x: batch * len * hdim1
y: batch * len_sc * hdim2
x_mask: batch * len (1 for padding, 0 for true)
Output:
alpha = batch * len
xWy = batch * len_sc * len
"""
batch_size = y.size(0)
ly = y.size(1)
y = y.view(-1, self.ysize)
Wy = self.linear(y) if self.linear is not None else y
Wy = Wy.view(batch_size, ly, self.ysize)
Wy = Wy.permute(0, 2, 1)
xWy = x.bmm(Wy)
xWy = xWy.permute(0, 2, 1)
if x_mask is not None:
# xWy.data.masked_fill_(x_mask.data.unsqueeze(1).repeat(1, ly, 1), -float('inf'))
x_mask = x_mask.unsqueeze(1).repeat(1, ly, 1)
xWy = xWy * (1 - x_mask) + x_mask * (-100000)
alpha = F.softmax(xWy, dim=-1) # batch * len_sc * len
return alpha
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
rnn_input_size = config.embedding_size + config.feature_tag_embedding_size * config.feature_num + \
config.answer_tag_embedding_size
self.num_directions = 2 if config.brnn else 1
assert config.enc_rnn_size % self.num_directions == 0
self.hidden_size = config.enc_rnn_size
rnn_hidden_size = self.hidden_size // self.num_directions
# self.hidden_size = config.enc_rnn_size // self.num_directions
# rnn_hidden_size = self.hidden_size
self.rnn = nn.GRU(rnn_input_size, rnn_hidden_size,
num_layers=config.enc_num_layers,
dropout=config.dropout,
bidirectional=config.brnn, batch_first=True)
self.wf = nn.Linear(2 * self.hidden_size, self.hidden_size, bias=False)
self.wg = nn.Linear(2 * self.hidden_size, self.hidden_size, bias=False)
self.attn = BilinearSeqAttn(self.hidden_size, self.hidden_size)
def gated_self_attn(self, queries, memories, mask):
# queries: [b,t,d]
# memories: [b,t,d]
# mask: [b,t]
energies = torch.matmul(queries, memories.transpose(1, 2)) # [b, t, t]
mask = mask.unsqueeze(1)
energies = energies.masked_fill(mask == 0, value=-1e12)
scores = F.softmax(energies, dim=2)
context = torch.matmul(scores, queries)
inputs = torch.cat([queries, context], dim=2)
f_t = torch.tanh(self.update_layer(inputs))
g_t = torch.sigmoid(self.gate(inputs))
updated_output = g_t * f_t + (1 - g_t) * queries
return updated_output
def forward(self, paragraph_embedded, paragraph_mask, evidences_embedded=None, evidences_mask=None):
paragraph_lengths = paragraph_mask.sum(1).tolist()
paragraph_packed = pack_padded_sequence(paragraph_embedded, paragraph_lengths,
batch_first=True, enforce_sorted=False)
paragraph_outputs, paragraph_state = self.rnn(paragraph_packed)
paragraph_outputs, _ = pad_packed_sequence(paragraph_outputs, batch_first=True,
total_length=paragraph_mask.size(1))
evidences_lengths = evidences_mask.sum(1).tolist()
evidences_packed = pack_padded_sequence(evidences_embedded, evidences_lengths,
batch_first=True, enforce_sorted=False)
evidences_outputs, hidden_t = self.rnn(evidences_packed)
evidences_outputs, _ = pad_packed_sequence(evidences_outputs, batch_first=True,
total_length=evidences_mask.size(1))
# paragraph_outputs = paragraph_outputs.permute(1, 0, 2).contiguous()
# evidences_outputs = evidences_outputs.permute(1, 0, 2).contiguous()
batch_size = paragraph_outputs.size(0)
# T = outputs2.size(1) # context sentence length (word level)
J = paragraph_outputs.size(1) # source sentence length (word level)
# para_pad_mask = Variable(parainput[0].eq(s2s.Constants.PAD).float(), requires_grad=False,
# volatile=False).transpose(0, 1)
# para_pad_mask = Variable(input[0].eq(s2s.Constants.PAD).float(), requires_grad=False,
# volatile=False).transpose(0, 1)
# this_paragraph_mask = 1.0 - paragraph_mask.long()
# scores = self.attn(paragraph_outputs, evidences_outputs, this_paragraph_mask) # batch * len_sc * len_para
this_evidences_mask = 1.0 - evidences_mask.long()
scores = self.attn(evidences_outputs, paragraph_outputs, this_evidences_mask) # batch * len_sc * len_para
# context = scores.unsqueeze(1).bmm(source_hiddens).squeeze(1)
# shape = (batch_size, T, J, self.hidden_size) # (N, T, J, 2d)
# embd_context = outputs2.unsqueeze(2) # (N, T, 1, 2d)
# embd_context = embd_context.expand(shape) # (N, T, J, 2d)
# embd_source = outputs.unsqueeze(1) # (N, 1, J, 2d)
# embd_source = embd_source.expand(shape) # (N, T, J, 2d)
# a_elmwise_mul_b = torch.mul(embd_context, embd_source) # (N, T, J, 2d)
# # cat_data = torch.cat((embd_context_ex, embd_source_ex, a_elmwise_mul_b), 3) # (N, T, J, 6d), [h;u;h◦u]
# S = self.W(torch.cat((embd_context, embd_source, a_elmwise_mul_b), 3)).view(batch_size, T, J) # (N, T, J)
#
# para_pad_mask = para_pad_mask.unsqueeze(2).repeat(1, 1, J)
# S = S*(1-para_pad_mask) + para_pad_mask*(-1000000)
# self_att = F.softmax(S, dim=-2).permute(0, 2, 1)
#
q2c = torch.bmm(scores, evidences_outputs) # (N, J, 2d) = bmm( (N, J, T), (N, T, 2d) )
# emb2 = pack(torch.cat((q2c.permute(1, 0, 2), outputs.permute(1, 0, 2)), dim=-1), lengths)
# outputs_f, hidden_t_f = self.rnn2(emb2, hidden)
# if isinstance(input, tuple):
# outputs_f = unpack(outputs_f)[0]
f_sc = torch.tanh(self.wf(torch.cat((paragraph_outputs, q2c), dim=-1).view(-1, self.hidden_size * 2)))
g_sc = torch.sigmoid(self.wg(torch.cat((paragraph_outputs, q2c), dim=-1).view(-1, self.hidden_size * 2)))
x = g_sc * f_sc + (1 - g_sc) * (paragraph_outputs.view(-1, self.hidden_size))
# x = x.view(batch_size, J, 2 * self.hidden_size)
x = x.view(batch_size, J, self.hidden_size)
_, b, d = paragraph_state.size()
h = paragraph_state.view(2, 2, b, d) # [n_layers, bi, b, d]
h = torch.cat((h[:, 0, :, :], h[:, 1, :, :]), dim=-1)
return x, h
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
self.vocab_size = config.vocab_size
hidden_size = config.dec_rnn_size
self.encoder_trans = nn.Linear(hidden_size, hidden_size)
self.reduce_layer = nn.Linear(
config.embedding_size + hidden_size, config.embedding_size)
self.lstm = nn.LSTM(config.embedding_size, hidden_size, batch_first=True,
num_layers=config.dec_num_layers, bidirectional=False, dropout=config.dropout)
self.concat_layer = nn.Linear(2 * hidden_size, hidden_size)
self.logit_layer = nn.Linear(hidden_size, config.vocab_size)
self.use_pointer = config.use_pointer
self.UNK_ID = config.unk_token_id
@staticmethod
def attention(query, memories, mask):
# query : [b, 1, d]
energy = torch.matmul(query, memories.transpose(1, 2)) # [b, 1, t]
energy = energy.squeeze(1).masked_fill(mask == 0, value=-1e12)
attn_dist = F.softmax(energy, dim=1).unsqueeze(dim=1) # [b, 1, t]
context_vector = torch.matmul(attn_dist, memories) # [b, 1, d]
return context_vector, energy
def get_encoder_features(self, encoder_outputs):
return self.encoder_trans(encoder_outputs)
def forward(self, trg_seq_embedded, ext_src_seq, init_states, encoder_outputs, encoder_mask):
# trg_seq : [b,t]
# init_states : [2,b,d]
# encoder_outputs : [b,t,d]
# init_states : a tuple of [2, b, d]
# device = trg_seq.device
# batch_size, max_len = trg_seq.size()
device = trg_seq_embedded.device
batch_size, max_len, _ = trg_seq_embedded.size()
hidden_size = encoder_outputs.size(-1)
memories = self.get_encoder_features(encoder_outputs)
logits = []
# init decoder hidden states and context vector
# prev_states = init_states
prev_states = (init_states, init_states)
prev_context = torch.zeros((batch_size, 1, hidden_size))
prev_context = prev_context.to(device)
coverage_sum = None
coverage_output = []
attention_output = []
energies = []
for i in range(max_len):
# y_i = trg_seq[:, i].unsqueeze(1) # [b, 1]
# embedded = self.embedding(y_i) # [b, 1, d]
embedded = trg_seq_embedded[:, i, :].unsqueeze(1) # [b, 1, d]
lstm_inputs = self.reduce_layer(
torch.cat([embedded, prev_context], 2))
output, states = self.lstm(lstm_inputs, prev_states)
# encoder-decoder attention
context, energy = self.attention(output, memories, encoder_mask)
concat_input = torch.cat((output, context), dim=2).squeeze(dim=1)
logit_input = torch.tanh(self.concat_layer(concat_input))
logit = self.logit_layer(logit_input) # [b, |V|]
# maxout pointer network
if self.use_pointer:
num_oov = max(torch.max(ext_src_seq - self.vocab_size + 1), 0)
# zeros = torch.zeros((batch_size, num_oov)).type(dtype=logit.dtype) #TODO:
zeros = logit.data.new_zeros(size=(batch_size, num_oov))
extended_logit = torch.cat([logit, zeros], dim=1)
out = torch.zeros_like(extended_logit) - INF
out, _ = scatter_max(energy, ext_src_seq, out=out) #TODO: scatter_sum.
out = out.masked_fill(out == -INF, 0)
logit = extended_logit + out
logit = logit.masked_fill(logit == 0, -INF)
logits.append(logit)
energies.append(energy)
# update prev state and context
prev_states = states
prev_context = context
if coverage_sum is None:
# coverage_sum = context.data.new(encoder_outputs.size(0), encoder_outputs.size(1)).zero_().clone().detach()
coverage_sum = context.data.new_zeros(size=(encoder_outputs.size(0), encoder_outputs.size(1)))
# avg_tmp_coverage = coverage_sum / (i + 1)
# coverage_loss = torch.sum(torch.min(energy, avg_tmp_coverage), dim=1)
coverage_output.append(coverage_sum.data.clone())
attn_dist = F.softmax(energy, dim=1) # [b, t]
attention_output.append(attn_dist)
coverage_sum += attn_dist
# logits = torch.stack(logits, dim=1) # [b, t, |V|]
return logits, attention_output, coverage_output, energies
def decode(self, embedded_y, ext_x, prev_states, prev_context, encoder_features, encoder_mask):
# forward one step lstm
# y : [b]
# embedded = self.embedding(y.unsqueeze(1))
embedded = embedded_y.unsqueeze(1)
lstm_inputs = self.reduce_layer(torch.cat([embedded, prev_context], 2))
output, states = self.lstm(lstm_inputs, prev_states)
context, energy = self.attention(output,
encoder_features,
encoder_mask)
concat_input = torch.cat((output, context), 2).squeeze(1)
logit_input = torch.tanh(self.concat_layer(concat_input))
logit = self.logit_layer(logit_input) # [b, |V|]
if self.use_pointer:
# batch_size = y.size(0)
batch_size = embedded_y.size(0)
num_oov = max(torch.max(ext_x - self.vocab_size + 1), 0)
# zeros = torch.zeros((batch_size, num_oov)).type(dtype=logit.dtype)
zeros = logit.data.new_zeros(size=(batch_size, num_oov))
extended_logit = torch.cat([logit, zeros], dim=1)
out = torch.zeros_like(extended_logit) - INF
out, _ = scatter_max(energy, ext_x, out=out)
out = out.masked_fill(out == -INF, 0)
logit = extended_logit + out
logit = logit.masked_fill(logit == -INF, 0)
# forcing UNK prob 0
logit[:, self.UNK_ID] = -INF
return logit, states, context
class DecoderV2(nn.Module):
def __init__(self, config):
super(DecoderV2, self).__init__()
self.vocab_size = config.vocab_size
self.layers = config.dec_num_layers
self.input_feed = config.input_feed
input_size = config.embedding_size
if self.input_feed:
input_size += config.enc_rnn_size
# num_directions = 2 if config.brnn else 1
# self.enc_dec_transformer = nn.Linear(config.enc_rnn_size // num_directions, config.dec_rnn_size)
self.enc_dec_transformer = nn.Linear(config.enc_rnn_size, config.dec_rnn_size)
self.rnn = nn.LSTM(config.dec_num_layers, input_size, config.dec_rnn_size, config.dropout)
self.attn = ConcatAttention(config.enc_rnn_size, config.dec_rnn_size, config.ctx_attn_size)
self.dropout = nn.Dropout(config.dropout)
self.readout = nn.Linear((config.enc_rnn_size + config.dec_rnn_size + config.embedding_size), config.dec_rnn_size)
self.maxout = MaxOut(config.maxout_pool_size)
self.maxout_pool_size = config.maxout_pool_size
self.copySwitch_l1 = nn.Linear(config.enc_rnn_size + config.dec_rnn_size, 1)
# self.copySwitch2 = nn.Linear(opt.ctx_rnn_size + opt.dec_rnn_size, 1)
self.hidden_size = config.dec_rnn_size
# self.cover = []
self.logit_layer = nn.Linear(config.dec_rnn_size // config.maxout_pool_size, config.vocab_size)
self.use_pointer = config.use_pointer
self.UNK_ID = config.unk_token_id
def init_rnn_hidden(self, enc_hidden):
# enc_hidden = torch.cat([enc_states[0], enc_states[-1]], dim=-1)
dec_hidden = self.enc_dec_transformer(enc_hidden)
return dec_hidden
def forward(self, trg_seq_embedded, ext_src_seq, enc_states, encoder_outputs, encoder_mask):
device = trg_seq_embedded.device
batch_size, max_len, _ = trg_seq_embedded.size()
hidden_size = encoder_outputs.size(-1)
# memories = self.get_encoder_features(encoder_outputs)
memories = encoder_outputs
logits = []
energies = []
# init decoder hidden states and context vector
pre_hidden = self.init_rnn_hidden(enc_states)
pre_context = torch.zeros((batch_size, hidden_size))
pre_context = pre_context.to(device)
pre_compute = None
self.attn.applyMask(encoder_mask)
coverage_output = []
attention_output = []
coverage = memories.data.new_zeros(size=(encoder_outputs.size(0), encoder_outputs.size(1)))
for i in range(max_len):
# Embedding
embedded = trg_seq_embedded[:, i, :] # [b, d]
input_emb = embedded
if self.input_feed:
input_emb = torch.cat([embedded, pre_context], 1)
# Decoder
output, hidden = self.rnn(input_emb, pre_hidden)
# Encoder-Decoder Attention
context, attn_dist, pre_compute, energy = self.attn(output, memories, coverage, pre_compute, encoder_mask)
# Maxout
readout = self.readout(torch.cat((embedded, output, context), dim=1))
maxout = self.maxout(readout)
output = self.dropout(maxout)
logit = self.logit_layer(output) # [b, |V|]
# # maxout pointer network
# if self.use_pointer:
# num_oov = max(torch.max(ext_src_seq - self.vocab_size + 1), 0)
# # zeros = torch.zeros((batch_size, num_oov)).type(dtype=logit.dtype) #TODO:
# zeros = logit.data.new_zeros(size=(batch_size, num_oov))
# extended_logit = torch.cat([logit, zeros], dim=1)
# out = torch.zeros_like(extended_logit) - INF
# out, _ = scatter_max(energy, ext_src_seq, out=out) #TODO: scatter_sum.
# # out = scatter_mean(energy, ext_src_seq, out=out) #TODO: scatter_sum.
# out = out.masked_fill(out == -INF, 0)
# logit = extended_logit + out
# logit = logit.masked_fill(logit == 0, -INF)
logits.append(logit)
energies.append(energy)
pre_context = context
pre_hidden = hidden
coverage_output.append(coverage.data.clone())
attention_output.append(attn_dist)
coverage = coverage + attn_dist
# logits = torch.stack(logits, dim=1) # [b, t, |V|]
return logits, attention_output, coverage_output, energies
def decode(self, embedded_y, ext_x, pre_hidden, prev_context, memories, encoder_mask, coverage, pre_compute=None):
# Embedding
input_emb = embedded_y
if self.input_feed:
input_emb = torch.cat([embedded_y, prev_context], 1)
# Decoder
output, hidden = self.rnn(input_emb, pre_hidden)
# Encoder-Decoder Attention
context, attn_dist, pre_compute, energy = self.attn(output, memories, coverage, pre_compute, encoder_mask)
# Maxout
readout = self.readout(torch.cat((embedded_y, output, context), dim=1))
maxout = self.maxout(readout)
output = self.dropout(maxout)
logit = self.logit_layer(output) # [b, |V|]
coverage = coverage + attn_dist
if self.use_pointer:
# batch_size = y.size(0)
batch_size = embedded_y.size(0)
num_oov = max(torch.max(ext_x - self.vocab_size + 1), 0)
# zeros = torch.zeros((batch_size, num_oov)).type(dtype=logit.dtype)
zeros = logit.data.new_zeros(size=(batch_size, num_oov))
extended_logit = torch.cat([logit, zeros], dim=1)
out = torch.zeros_like(extended_logit) - INF
out, _ = scatter_max(energy, ext_x, out=out)
# out = scatter_mean(energy, ext_x, out=out)
out = out.masked_fill(out == -INF, 0)
logit = extended_logit + out
logit = logit.masked_fill(logit == -INF, 0)
# forcing UNK prob 0
logit[:, self.UNK_ID] = -INF
return logit, hidden, context, coverage, pre_compute
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embeddings = Embeddings(config)
self.encoder = Encoder(config)
self.decoder = Decoder(config)
# self.decoder = DecoderV2(config)
def run_enc_embeddings(self, batch_data):
paragraph_feature_tag_ids_dict = {
'ner': batch_data.paragraph_ner_tag_ids,
'pos': batch_data.paragraph_pos_tag_ids,
# 'dep': batch_data.paragraph_dep_tag_ids,
# 'cas': batch_data.paragraph_cas_tag_ids,
}
paragraph_mask = (batch_data.paragraph_ids != batch_data.pad_token_id)
paragraph_embeddings = self.embeddings(input_ids=batch_data.paragraph_ids,
feature_tag_ids_dict=paragraph_feature_tag_ids_dict,
answer_tag_ids=batch_data.paragraph_ans_tag_ids)
evidences_feature_tag_ids_dict = {
'ner': batch_data.evidences_ner_tag_ids,
'pos': batch_data.evidences_pos_tag_ids,
# 'dep': batch_data.paragraph_dep_tag_ids,
# 'cas': batch_data.paragraph_cas_tag_ids,
}
evidences_mask = (batch_data.evidences_ids != batch_data.pad_token_id)
evidences_embeddings = self.embeddings(input_ids=batch_data.evidences_ids,
feature_tag_ids_dict=evidences_feature_tag_ids_dict,
answer_tag_ids=batch_data.evidences_ans_tag_ids)
return paragraph_embeddings, paragraph_mask, evidences_embeddings, evidences_mask
def forward(self, batch_data):
"""
Arguments:
attention_mask: torch.Tensor with 1 indicating tokens to ATTEND to
"""
paragraph_embeddings, paragraph_mask, evidences_embeddings, evidences_mask = self.run_enc_embeddings(batch_data)
enc_outputs, enc_states = self.encoder(paragraph_embeddings, paragraph_mask,
evidences_embeddings, evidences_mask)
sos_trg = batch_data.question_ids[:, :-1].contiguous()
embedding_output_for_decoder = self.embeddings(input_ids=sos_trg)
logits, attention_output, coverage_output, energies = self.decoder(
embedding_output_for_decoder, batch_data.paragraph_extended_ids, enc_states, enc_outputs, paragraph_mask)
model_output = {
'logits': logits,
'attentions': attention_output,
'coverages': coverage_output,
'energies': energies
}
return model_output
def beam_search(self, batch_data, beam_size,
tok2idx, TRG_SOS_INDEX, TRG_UNK_INDEX, TRG_EOS_INDEX,
min_decode_step, max_decode_step, device):
paragraph_embeddings, paragraph_mask, evidences_embeddings, evidences_mask = self.run_enc_embeddings(batch_data)
attention_mask = paragraph_mask
enc_outputs, enc_states = self.encoder(paragraph_embeddings, paragraph_mask,
evidences_embeddings, evidences_mask)
prev_context = torch.zeros(1, enc_outputs.size(-1)).cuda(device=device)
coverage = enc_outputs.data.new_zeros(size=(1, enc_outputs.size(1)))
# h, c = enc_states # [2, b, d] but b = 1
hypotheses = [Hypothesis(tokens=[TRG_SOS_INDEX],
log_probs=[0.0],
# state=(h[:, 0, :], c[:, 0, :]),
state=self.decoder.init_rnn_hidden(enc_states)[:, 0, :],
context=prev_context[0],
coverage=coverage[0]) for _ in range(beam_size)]
# tile enc_outputs, enc_mask for beam search
ext_src_seq = batch_data.paragraph_extended_ids.repeat(beam_size, 1)
enc_outputs = enc_outputs.repeat(beam_size, 1, 1)
# enc_features = self.decoder.get_encoder_features(enc_outputs)
memories = enc_outputs
enc_mask = attention_mask.repeat(beam_size, 1)
num_steps = 0
results = []
while num_steps < max_decode_step and len(results) < beam_size:
latest_tokens = [h.latest_token for h in hypotheses]
latest_tokens = [idx if idx < len(
tok2idx) else TRG_UNK_INDEX for idx in latest_tokens]
prev_y = torch.tensor(latest_tokens, dtype=torch.long, device=device).view(-1)
# if config.use_gpu:
# prev_y = prev_y.to(self.device)
# make batch of which size is beam size
# all_state_h = []
# all_state_c = []
all_state = []
all_context = []
all_coverage = []
for h in hypotheses:
# state_h, state_c = h.state # [num_layers, d]
# all_state_h.append(state_h)
# all_state_c.append(state_c)
all_state.append(h.state)
all_context.append(h.context)
all_coverage.append(h.coverage)
# prev_h = torch.stack(all_state_h, dim=1) # [num_layers, beam, d]
# prev_c = torch.stack(all_state_c, dim=1) # [num_layers, beam, d]
# prev_states = (prev_h, prev_c)
prev_states = torch.stack(all_state, dim=1) # [num_layers, beam, d]
prev_context = torch.stack(all_context, dim=0)
prev_coverage = torch.stack(all_coverage, dim=0)
# [beam_size, |V|]
embedded_prev_y = self.embeddings(prev_y)
logits, states, context_vector, coverage_vector, pre_compute = self.decoder.decode(embedded_prev_y, ext_src_seq,
prev_states, prev_context,
# enc_features, enc_mask)
memories, enc_mask, prev_coverage, pre_compute=None)
# h_state, c_state = states
log_probs = F.log_softmax(logits, dim=1)
top_k_log_probs, top_k_ids \
= torch.topk(log_probs, beam_size * 2, dim=-1)
all_hypotheses = []
num_orig_hypotheses = 1 if num_steps == 0 else len(hypotheses)
for i in range(num_orig_hypotheses):
h = hypotheses[i]
# state_i = (h_state[:, i, :], c_state[:, i, :])
state_i = states[:, i, :]
context_i = context_vector[i]
coverage_i = coverage_vector[i]
for j in range(beam_size * 2):
token = top_k_ids[i][j].item()
score = top_k_log_probs[i][j].item()
# vocab_entropy = torch.sum(-1 * log_probs[i] * torch.log(log_probs[i]), 1) / torch.log(len(tok2idx))
# copy_entropy = torch.sum(-1 * context_i * torch.log(context_i), 1) / torch.log(paragraph_embeddings.size(1))
new_h = h.extend(token=token,
log_prob=score,
state=state_i,
context=context_i,
coverage=coverage_i)
all_hypotheses.append(new_h)
hypotheses = []
for h in sort_hypotheses(all_hypotheses):
if h.latest_token == TRG_EOS_INDEX:
if num_steps >= min_decode_step:
results.append(h)
else:
hypotheses.append(h)
if len(hypotheses) == beam_size or len(results) == beam_size:
break
num_steps += 1
if len(results) == 0:
results = hypotheses
h_sorted = sort_hypotheses(results)
return h_sorted[0]
| [
"[email protected]"
] | |
92d598793743eeca4071fbfe7bad4091607dfc23 | a67a987ed078da0a1de2908c8c0e08070dee65b1 | /genice/lattices/Struct45.py | 11dfe1c4b51d9a767557cc9b8e42befaf7c351cf | [] | no_license | Python3pkg/GenIce | ef1ce7ee2997c10e08dde75ac36050a653cd4fc5 | 1e9458b7bf8e0fd2ad5d0c4f8987cea0ae7ca0b0 | refs/heads/master | 2021-01-21T17:31:51.595858 | 2017-05-21T14:09:32 | 2017-05-21T14:09:32 | 91,962,047 | 0 | 0 | null | 2017-05-21T14:09:28 | 2017-05-21T14:09:28 | null | UTF-8 | Python | false | false | 8,385 | py | """
Data source: Dutour Sikirić, Mathieu, Olaf Delgado-Friedrichs, and Michel Deza. “Space Fullerenes: a Computer Search for New Frank-Kasper Structures” Acta Crystallographica Section A Foundations of Crystallography 66.Pt 5 (2010): 602–615.
Cage composition:
(12,14,15,16) = (14,14,8,0,)
"""
pairs="""
31 128
36 90
140 120
197 56
133 68
132 50
76 103
105 30
161 192
135 118
25 102
147 51
34 77
47 43
145 139
13 160
50 111
159 117
187 125
48 7
25 176
170 202
18 120
24 165
180 4
148 53
141 155
136 6
93 3
112 102
185 66
67 154
173 111
185 30
17 180
172 8
130 203
80 92
114 169
205 122
205 121
70 189
28 61
134 181
26 3
199 92
4 129
35 134
172 19
107 37
79 119
70 191
140 89
202 60
125 179
103 61
180 139
148 84
191 188
180 98
64 89
20 194
106 150
12 71
80 176
158 16
148 27
148 173
123 97
29 126
65 51
9 165
130 187
2 69
118 201
170 50
2 38
94 5
174 83
34 121
11 127
199 158
32 189
171 166
51 43
59 164
128 127
77 78
190 107
82 140
176 91
24 105
46 179
112 197
138 173
22 201
183 165
66 116
164 43
18 86
14 166
17 63
130 170
52 178
157 57
23 36
135 175
29 88
97 85
175 54
115 81
193 84
20 145
47 198
143 202
86 3
91 99
144 67
178 111
1 41
50 155
149 146
59 65
168 200
47 0
198 38
97 79
145 126
31 75
39 85
85 38
57 177
4 137
63 16
199 107
6 72
94 121
19 0
29 71
144 86
117 171
115 55
197 137
54 90
123 72
156 177
9 90
130 153
45 129
15 26
183 201
95 4
68 159
138 41
42 79
133 83
182 110
151 39
78 61
171 129
70 184
195 95
195 96
182 84
62 186
33 160
44 194
71 167
47 196
18 33
70 90
177 168
21 13
109 11
100 124
169 102
103 125
198 3
138 155
77 152
93 120
23 62
192 167
177 181
190 169
44 102
190 176
17 106
19 192
30 127
21 74
75 9
72 42
5 189
119 147
163 37
66 75
1 153
106 145
10 162
192 91
191 205
114 126
31 118
112 126
88 8
53 73
172 71
136 69
82 76
54 146
174 162
69 42
141 185
168 137
149 7
110 40
52 170
113 24
173 89
127 153
17 157
103 203
129 56
1 141
161 163
155 187
57 98
2 151
22 128
83 87
134 158
120 154
152 116
67 55
32 183
142 76
82 73
28 46
136 65
64 101
113 204
109 31
27 203
162 98
134 169
58 7
20 99
36 188
204 5
204 184
28 41
117 95
45 87
60 193
150 25
157 194
63 181
187 30
14 133
115 110
142 179
14 54
49 0
79 26
160 110
28 152
92 8
23 117
72 43
11 78
159 96
150 92
114 99
75 184
200 48
44 56
114 167
175 48
121 201
132 104
20 25
174 139
15 33
122 186
188 118
151 74
57 87
58 87
80 19
186 188
113 152
97 33
13 119
142 89
113 100
160 154
58 171
125 124
85 86
143 153
195 7
131 49
164 80
58 68
131 163
10 96
161 65
144 81
23 14
161 74
5 165
156 166
64 154
63 137
149 122
109 191
32 205
190 59
12 199
76 202
52 193
46 100
49 38
74 0
62 200
123 49
135 9
6 12
1 116
59 167
175 186
35 99
198 42
81 26
156 83
133 10
140 178
21 123
82 27
100 108
150 16
109 34
29 158
122 96
45 139
196 55
138 179
2 163
36 183
35 88
64 53
168 162
15 40
132 101
136 107
67 39
195 98
182 111
46 104
200 95
106 88
52 27
194 45
128 116
131 6
101 60
24 66
77 22
21 51
178 40
112 16
60 73
144 53
15 193
35 157
55 93
44 181
174 197
18 73
78 124
203 41
37 91
93 40
105 22
156 56
10 62
48 166
146 68
143 104
34 204
12 164
108 11
143 61
131 172
32 159
132 142
37 8
141 104
146 189
105 124
115 119
69 147
182 101
149 94
184 108
185 108
13 39
81 84
196 151
196 147
135 94
"""
waters="""
0.80883 0.0 0.27714
0.19624 0.5 0.91946
0.875 0.5 0.26494
0.0 0.81173 0.17048
0.0 0.875 0.56455
0.80883 0.31617 0.76311
0.375 0.68383 0.30737
0.875 0.5 0.64549
0.81173 0.68383 0.37885
0.19117 0.31617 0.76311
0.5 0.875 0.62994
0.5 0.875 0.85451
0.31328 0.80883 0.34573
0.5 0.25 0.19172
0.31328 0.19117 0.65427
0.31617 0.81173 0.12116
0.18828 0.80376 0.45959
0.875 0.67876 0.5
0.68383 0.81173 0.12116
0.81173 0.0 0.32952
0.81173 0.19624 0.45959
0.5 0.125 0.23506
0.0 0.81173 0.82952
0.18828 0.0 0.67048
0.0 0.18828 0.82952
0.0 0.125 0.43545
0.19117 0.68673 0.15427
0.0 0.68673 0.02414
0.80376 0.5 0.91946
0.5 0.80376 0.41946
0.19117 0.0 0.88698
0.31617 0.625 0.80737
0.80883 0.0 0.72287
0.5 0.875 0.14549
0.68383 0.625 0.80737
0.625 0.5 0.45146
0.19117 0.0 0.72287
0.875 0.5 0.35451
0.80883 0.68383 0.23689
0.68383 0.375 0.19264
0.19117 0.0 0.11302
0.0 0.5 0.95173
0.19117 0.68383 0.23689
0.19117 0.0 0.27714
0.125 0.32124 0.5
0.81173 0.19624 0.54041
0.67876 0.31328 0.92732
0.0 0.0 0.25
0.125 0.5 0.64549
0.68383 0.80883 0.26311
0.32124 0.125 0.0
0.31617 0.19117 0.26311
0.19624 0.81173 0.04041
0.80376 0.5 0.08055
0.375 0.31617 0.69264
0.0 0.18828 0.17048
0.18828 0.19624 0.54041
0.625 0.5 0.54854
0.81173 0.31617 0.62116
0.31328 0.19117 0.34573
0.5 0.625 0.04854
0.67876 0.68673 0.92732
0.31328 0.80883 0.65427
0.125 0.67876 0.5
0.67876 0.31328 0.07268
0.375 0.31617 0.30737
0.19117 0.31328 0.84573
0.80883 0.31328 0.15427
0.68673 0.19117 0.65427
0.125 0.5 0.26494
0.5 0.125 0.76494
0.5 0.875 0.37007
0.31617 0.80883 0.26311
0.67876 0.68673 0.07268
0.68383 0.19117 0.26311
0.31617 0.375 0.80737
0.67876 0.875 0.0
0.80883 0.68673 0.84573
0.68383 0.81173 0.87885
0.31617 0.625 0.19264
0.0 0.0 0.35478
0.125 0.5 0.12994
0.80376 0.81173 0.04041
0.5 0.19624 0.58055
0.19624 0.5 0.08055
0.68383 0.625 0.19264
0.80883 0.68673 0.15427
0.68673 0.32124 0.57268
0.68673 0.67876 0.42732
0.80376 0.18828 0.04041
0.31617 0.19117 0.73689
0.81173 0.31617 0.37885
0.0 0.80883 0.38698
0.0 0.0 0.14523
0.875 0.5 0.73506
0.0 0.80883 0.61302
0.68673 0.80883 0.65427
0.5 0.75 0.19172
0.68673 0.67876 0.57268
0.68673 0.32124 0.42732
0.68383 0.18828 0.87885
0.5 0.375 0.04854
0.18828 0.19624 0.45959
0.80376 0.81173 0.95959
0.5 0.375 0.95146
0.0 0.0 0.85478
0.81173 0.80376 0.45959
0.125 0.5 0.35451
0.5 0.125 0.85451
0.5 0.75 0.80828
0.31617 0.18828 0.12116
0.19624 0.18828 0.04041
0.31328 0.0 0.47586
0.80883 0.31328 0.84573
0.5 0.19624 0.41946
0.19117 0.31328 0.15427
0.125 0.5 0.87007
0.0 0.0 0.64523
0.19117 0.68383 0.76311
0.31617 0.375 0.19264
0.80883 0.0 0.11302
0.80883 0.68383 0.76311
0.625 0.68383 0.69264
0.5 0.875 0.23506
0.80883 0.0 0.88698
0.875 0.0 0.93545
0.5 0.0 0.45173
0.31617 0.81173 0.87885
0.19117 0.68673 0.84573
0.0 0.125 0.56455
0.19624 0.81173 0.95959
0.625 0.68383 0.30737
0.5 0.25 0.0
0.5 0.125 0.62994
0.375 0.5 0.45146
0.125 0.5 0.73506
0.25 0.5 0.30828
0.18828 0.80376 0.54041
0.0 0.31328 0.97586
0.68673 0.0 0.52414
0.875 0.0 0.06455
0.32124 0.31328 0.92732
0.67876 0.125 0.0
0.5 0.625 0.95146
0.875 0.5 0.12994
0.68673 0.0 0.47586
0.625 0.31617 0.69264
0.19117 0.31617 0.23689
0.0 0.5 0.04828
0.75 0.5 0.69172
0.0 0.875 0.43545
0.80883 0.31617 0.23689
0.875 0.5 0.87007
0.32124 0.68673 0.92732
0.68383 0.18828 0.12116
0.19624 0.18828 0.95959
0.31328 0.32124 0.57268
0.75 0.5 0.5
0.31328 0.67876 0.42732
0.81173 0.0 0.67048
0.5 0.125 0.14549
0.625 0.31617 0.30737
0.5 0.80376 0.58055
0.75 0.5 0.30828
0.18828 0.0 0.32952
0.0 0.19117 0.77714
0.18828 0.31617 0.62116
0.5 0.125 0.37007
0.31328 0.67876 0.57268
0.31328 0.32124 0.42732
0.32124 0.875 0.0
0.0 0.19117 0.61302
0.68673 0.80883 0.34573
0.0 0.31328 0.02414
0.5 0.0 0.54828
0.25 0.5 0.69172
0.0 0.19117 0.38698
0.375 0.5 0.54854
0.125 0.0 0.06455
0.80376 0.18828 0.95959
0.81173 0.80376 0.54041
0.25 0.5 0.5
0.32124 0.31328 0.07268
0.0 0.0 0.75
0.5 0.25 0.80828
0.31617 0.18828 0.87885
0.375 0.68383 0.69264
0.125 0.0 0.93545
0.31617 0.80883 0.73689
0.68383 0.19117 0.73689
0.18828 0.31617 0.37885
0.5 0.875 0.76494
0.68673 0.19117 0.34573
0.32124 0.68673 0.07268
0.875 0.32124 0.5
0.81173 0.68383 0.62116
0.0 0.19117 0.22287
0.31328 0.0 0.52414
0.0 0.80883 0.22287
0.18828 0.68383 0.37885
0.18828 0.68383 0.62116
0.0 0.80883 0.77714
0.5 0.75 0.0
0.0 0.68673 0.97586
0.68383 0.375 0.80737
0.68383 0.80883 0.73689
"""
coord= "relative"
cages="""
12 0.0 -0.2531 -0.09655
12 0.0 0.0 0.5
15 -0.5 0.5 -0.12908
15 0.0 0.5 0.56509
12 -0.2531 0.0 0.59655
15 0.5 0.5 -0.62908
14 0.0 -0.26467 -0.6939
12 -0.2531 0.0 0.40345
15 0.5 0.5 0.12908
12 0.0 0.2531 0.09655
14 0.0 -0.26467 0.6939
14 -0.26467 0.0 0.1939
14 0.28496 0.5 0.0
14 -0.26467 0.0 -0.1939
15 -0.5 0.0 -0.06509
15 -0.5 0.5 0.62908
14 0.26467 0.0 -0.1939
14 -0.5 0.28496 0.5
14 0.26467 0.0 0.1939
12 0.0 0.2531 -0.09655
12 0.5 0.0 -0.69756
14 0.0 0.26467 -0.6939
14 0.5 0.5 0.25
15 0.0 0.5 -0.56509
12 -0.5 0.0 0.69756
14 0.5 -0.28496 0.5
14 -0.28496 -0.5 0.0
12 0.0 -0.2531 0.09655
14 0.0 0.26467 0.6939
12 0.0 0.5 -0.19756
15 0.5 0.0 0.06509
12 0.2531 0.0 -0.59655
14 -0.5 0.5 0.75
12 0.2531 0.0 0.59655
12 0.0 0.0 0.0
12 0.0 0.5 0.19756
"""
bondlen = 3
celltype = 'rect'
cell = """
13.050670933678957 13.050670933678957 57.77874231167655
"""
density = 0.625698640770261
| [
"[email protected]"
] | |
eac6d81a7087ff1b93907223fa052c932e2bd838 | b548e7c2940d6e041f15624cd66f2dfe98b64fca | /official/nlp/modeling/layers/transformer_scaffold.py | b0932f7e0b7ead6a213cb3a7a9ca67edf5d86bb4 | [
"Apache-2.0"
] | permissive | ishani-chakraborty/models | 60cfc1ebb7b73b03a95b9ff522bf6289eb81fd24 | a811a3b7e640722318ad868c99feddf3f3063e36 | refs/heads/master | 2022-11-26T18:03:28.501746 | 2020-08-04T03:10:25 | 2020-08-04T03:10:25 | 284,893,619 | 0 | 1 | Apache-2.0 | 2020-08-04T06:14:13 | 2020-08-04T06:14:12 | null | UTF-8 | Python | false | false | 12,636 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer scaffold layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import gin
import tensorflow as tf
from official.nlp.modeling.layers import attention
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class TransformerScaffold(tf.keras.layers.Layer):
"""Transformer scaffold layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762), with a customizable attention layer and
feedforward layer option. Users can pass a class to
`attention_cls`/`feedforward_cls` and associated config to
`attention_cfg`/`feedforward_cfg`, in which case the scaffold will
instantiate the class with the config, or pass a class instance to
`attention_cls`/`feedforward_cls`.
Arguments:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
attention_cls: A class to instantiate attention layer, or a layer instance.
attention_cfg: The config with which to instantiate `attention_cls`. Ignored
if attention_cls is a layer instance or None. If `attention_cls` is a
class, but `attention_cfg` is None, following kwargs will be used to
instantiate the attention instance:
{
"num_heads": num_attention_heads,
"key_size": int(hidden_size // num_attention_heads),
"dropout": attention_dropout_rate,
"name": "self_attention"
}, where `hidden_size` is the input tensor's last dimension.
feedforward_cls: A class to instantiate feedforward layer, or a layer
instance. If None, will use the standard feedforward layer as described
in "Attention Is All You Need" paper. If not None, the instantiated
feedforward layer is expected to take the output of attention as input
and its output is this transformer layer's output.
feedforward_cfg: The config with which to instantiate `feedforward_cls`.
Ignored if feedforward_cls is a layer instance or is None.
If `feedforward_cls` is a class, but `feedforward_cfg` is None, following
kwargs will be used to instantiate the feedforward instance:
{
"intermediate_size": intermediate_size,
"intermediate_activation": intermediate_activation,
"dropout": dropout_rate,
"name": "feedforward"
}.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
attention_cls=attention.MultiHeadAttention,
attention_cfg=None,
feedforward_cls=None,
feedforward_cfg=None,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(TransformerScaffold, self).__init__(**kwargs)
self._attention_cfg = attention_cfg
self._attention_cls = attention_cls
self._feedforward_cls = feedforward_cls
self._feedforward_cfg = feedforward_cfg
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
input_tensor_shape = tf.TensorShape(input_tensor)
if len(input_tensor_shape) != 3:
raise ValueError(
"TransformerScaffold expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError("When passing a mask tensor to TransformerLayer, the "
"mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
def get_layer_instance(instance_or_cls, config, default_config):
if isinstance(instance_or_cls, tf.keras.layers.Layer):
return instance_or_cls
else:
if config is None:
return instance_or_cls(**default_config)
else:
return instance_or_cls(**config)
default_attention_cfg = {
"num_heads": self._num_heads,
"key_size": self._attention_head_size,
"dropout": self._attention_dropout_rate,
"name": "self_attention"
}
default_attention_cfg.update(common_kwargs)
self._attention_layer = get_layer_instance(
self._attention_cls,
config=self._attention_cfg,
default_config=default_attention_cfg)
if self._feedforward_cls is not None:
default_feedforward_cfg = {
"intermediate_size": self._intermediate_size,
"intermediate_activation": self._intermediate_activation,
"dropout": self._dropout_rate,
"name": "feedforward",
}
default_feedforward_cfg.update(common_kwargs)
self._feedforward_block = get_layer_instance(
self._feedforward_cls,
config=self._feedforward_cfg,
default_config=default_feedforward_cfg)
else:
self._feedforward_block = None
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm", axis=-1, epsilon=1e-12,
dtype=tf.float32))
if self._feedforward_block is None:
self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._intermediate_size),
bias_axes="d",
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.experimental.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._intermediate_activation, dtype=policy)
self._output_dense = tf.keras.layers.experimental.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
super(TransformerScaffold, self).build(input_shape)
def get_config(self):
config = {
"attention_cls":
self._attention_layer,
"feedforward_cls":
self._feedforward_block,
"num_attention_heads":
self._num_heads,
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(TransformerScaffold, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
input_tensor, attention_mask = inputs
else:
input_tensor, attention_mask = (inputs, None)
attention_output = self._attention_layer(
query=input_tensor, value=input_tensor, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
attention_output = self._attention_layer_norm(input_tensor +
attention_output)
if self._feedforward_block is None:
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
# During mixed precision training, attention_output is from layer norm
# and is always fp32 for now. Cast layer_output to fp32 for the subsequent
# add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
else:
layer_output = self._feedforward_block(attention_output)
return layer_output
| [
"[email protected]"
] | |
9edcfd8e23912adb9f02ffa2158ad3e4a1c337a0 | 6cf0c21f14076979edafb401caf6f0be54377490 | /vedastr_cstr/vedastr/models/bodies/rectificators/sspin.py | 26c593457eb7e1d3db2b610c4456f3b659256f07 | [
"MIT",
"Apache-2.0"
] | permissive | shekarneo/ocr-teamcode | abb23a6cb1df597ee0f8a45d1caf4c6374ce1d46 | 86d5070e8f907571a47967d64facaee246d92a35 | refs/heads/main | 2023-06-02T19:29:20.640271 | 2021-06-23T15:03:31 | 2021-06-23T15:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # We implement a new module which has same property like spin to some extent.
# We think this manner can replace the GA-SPIN by enlarging output features
# of se layer, but we didn't do further experiments.
import torch.nn as nn
from vedastr.models.bodies.feature_extractors import build_feature_extractor
from vedastr.models.utils import SE
from vedastr.models.weight_init import init_weights
from .registry import RECTIFICATORS
@RECTIFICATORS.register_module
class SSPIN(nn.Module):
def __init__(self, feature_cfg, se_cfgs):
super(SSPIN, self).__init__()
self.body = build_feature_extractor(feature_cfg)
self.se = SE(**se_cfgs)
init_weights(self.modules())
def forward(self, x):
x = self.body(x)
x = self.se(x)
return x
| [
"[email protected]"
] | |
990b6e7b21dac9e0e08d4c442e1c8ea4175bb38e | ce63cec7c28611bb0c43bd503996718716246538 | /reagent/test/ranking/test_seq2slate_trainer.py | 00ea6e7d9864f353b459580e5b3f0214057dfe08 | [
"BSD-3-Clause"
] | permissive | mcx/ReAgent | 70cbf5484656c8bdf722155e0eacac0385a3e276 | 57b58a8b3a6b74bb87a197b73a6cd108ddad895e | refs/heads/master | 2023-08-10T15:37:02.664394 | 2021-10-14T01:52:49 | 2021-10-14T01:53:55 | 329,295,166 | 0 | 0 | BSD-3-Clause | 2021-10-14T02:16:26 | 2021-01-13T12:04:53 | Python | UTF-8 | Python | false | false | 19,844 | py | import copy
import itertools
import logging
import random
import unittest
from itertools import permutations
import numpy as np
import numpy.testing as npt
import pytorch_lightning as pl
import reagent.core.types as rlt
import torch
from parameterized import parameterized
from reagent.core.parameters import Seq2SlateParameters
from reagent.core.parameters_seq2slate import IPSClamp, IPSClampMethod
from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union, classes
from reagent.samplers.frechet import FrechetSort
from reagent.training.ranking.helper import ips_clamp
from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer
from torch.utils.data import DataLoader
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
output_arch_list = [
Seq2SlateOutputArch.FRECHET_SORT,
Seq2SlateOutputArch.AUTOREGRESSIVE,
]
policy_gradient_interval_list = [1, 5]
clamp_method_list = [IPSClampMethod.UNIVERSAL, IPSClampMethod.UNIVERSAL]
clamp_max_list = [1.0, 10.0]
frechet_sort_shape_list = [0.1, 0.5, 1.0]
def create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
):
return Seq2SlateTrainer(
seq2slate_net=seq2slate_net,
params=seq2slate_params,
policy_optimizer=Optimizer__Union(SGD=classes["SGD"](lr=learning_rate)),
policy_gradient_interval=policy_gradient_interval,
print_interval=1,
)
def create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
):
return Seq2SlateTransformerNet(
state_dim=state_dim,
candidate_dim=candidate_dim,
num_stacked_layers=2,
num_heads=2,
dim_model=hidden_size,
dim_feedforward=hidden_size,
max_src_seq_len=candidate_num,
max_tgt_seq_len=candidate_num,
output_arch=output_arch,
temperature=0.5,
)
def create_on_policy_batch(
seq2slate, batch_size, state_dim, candidate_num, candidate_dim, rank_seed, device
):
state = torch.randn(batch_size, state_dim).to(device)
candidates = torch.randn(batch_size, candidate_num, candidate_dim).to(device)
reward = torch.rand(batch_size, 1).to(device)
batch = rlt.PreprocessedRankingInput.from_input(
state=state, candidates=candidates, device=device
)
# Reset seed here so that gradients can be replicated.
torch.manual_seed(rank_seed)
rank_output = seq2slate(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False
)
ranked_order = rank_output.ranked_tgt_out_idx - 2
ranked_slate_prob = rank_output.ranked_per_seq_probs
on_policy_batch = rlt.PreprocessedRankingInput.from_input(
state=state,
candidates=candidates,
device=device,
action=ranked_order,
logged_propensities=ranked_slate_prob.detach(),
slate_reward=reward,
)
return on_policy_batch
def create_off_policy_batch(
seq2slate, batch_size, state_dim, candidate_num, candidate_dim, device
):
state = torch.randn(batch_size, state_dim)
candidates = torch.randn(batch_size, candidate_num, candidate_dim)
reward = torch.rand(batch_size, 1)
action = torch.stack([torch.randperm(candidate_num) for _ in range(batch_size)])
logged_slate_prob = torch.rand(batch_size, 1) / 1e12
off_policy_batch = rlt.PreprocessedRankingInput.from_input(
state=state,
candidates=candidates,
device=device,
action=action,
logged_propensities=logged_slate_prob,
slate_reward=reward,
)
return off_policy_batch
class TestSeq2SlateTrainer(unittest.TestCase):
def setUp(self):
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
def assert_correct_gradient(
self,
net_with_gradient,
net_after_gradient,
policy_gradient_interval,
learning_rate,
):
for (n_c, w_c), (n, w) in zip(
net_with_gradient.named_parameters(), net_after_gradient.named_parameters()
):
assert n_c == n
if w_c.grad is not None:
assert torch.allclose(
w_c - policy_gradient_interval * learning_rate * w_c.grad,
w,
rtol=1e-4,
atol=2e-6,
)
def test_ips_clamp(self):
importance_sampling = torch.tensor([0.5, 0.3, 3.0, 10.0, 40.0])
assert torch.all(ips_clamp(importance_sampling, None) == importance_sampling)
assert torch.all(
ips_clamp(importance_sampling, IPSClamp(IPSClampMethod.AGGRESSIVE, 3.0))
== torch.tensor([0.5, 0.3, 3.0, 0.0, 0.0])
)
assert torch.all(
ips_clamp(importance_sampling, IPSClamp(IPSClampMethod.UNIVERSAL, 3.0))
== torch.tensor([0.5, 0.3, 3.0, 3.0, 3.0])
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_trainer_on_policy_gpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_on_policy(
policy_gradient_interval, output_arch, device=torch.device("cuda")
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
def test_seq2slate_trainer_on_policy_cpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_on_policy(
policy_gradient_interval, output_arch, device=torch.device("cpu")
)
def _test_seq2slate_trainer_on_policy(
self, policy_gradient_interval, output_arch, device
):
batch_size = 32
state_dim = 2
candidate_num = 15
candidate_dim = 4
hidden_size = 16
learning_rate = 1.0
on_policy = True
rank_seed = 111
seq2slate_params = Seq2SlateParameters(on_policy=on_policy)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
).to(device)
seq2slate_net_copy = copy.deepcopy(seq2slate_net).to(device)
seq2slate_net_copy_copy = copy.deepcopy(seq2slate_net).to(device)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
batch = create_on_policy_batch(
seq2slate_net,
batch_size,
state_dim,
candidate_num,
candidate_dim,
rank_seed,
device,
)
training_data = DataLoader([batch], collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(
max_epochs=policy_gradient_interval,
gpus=None if device == torch.device("cpu") else 1,
logger=False,
)
pl_trainer.fit(trainer, training_data)
seq2slate_net = trainer.seq2slate_net.to(device)
# manual compute gradient
torch.manual_seed(rank_seed)
rank_output = seq2slate_net_copy(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False
)
loss = -(
torch.mean(torch.log(rank_output.ranked_per_seq_probs) * batch.slate_reward)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate
)
# another way to compute gradient manually
torch.manual_seed(rank_seed)
ranked_per_seq_probs = seq2slate_net_copy_copy(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False
).ranked_per_seq_probs
loss = -(
torch.mean(
ranked_per_seq_probs
/ ranked_per_seq_probs.detach()
* batch.slate_reward
)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy_copy,
seq2slate_net,
policy_gradient_interval,
learning_rate,
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_seq2slate_trainer_off_policy_gpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_off_policy(
policy_gradient_interval, output_arch, device=torch.device("cuda")
)
@parameterized.expand(
itertools.product(policy_gradient_interval_list, output_arch_list)
)
def test_seq2slate_trainer_off_policy_cpu(
self, policy_gradient_interval, output_arch
):
self._test_seq2slate_trainer_off_policy(
policy_gradient_interval, output_arch, device=torch.device("cpu")
)
def _test_seq2slate_trainer_off_policy(
self, policy_gradient_interval, output_arch, device
):
batch_size = 32
state_dim = 2
candidate_num = 15
candidate_dim = 4
hidden_size = 16
learning_rate = 1.0
on_policy = False
seq2slate_params = Seq2SlateParameters(on_policy=on_policy)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
).to(device)
seq2slate_net_copy = copy.deepcopy(seq2slate_net).to(device)
seq2slate_net_copy_copy = copy.deepcopy(seq2slate_net).to(device)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
batch = create_off_policy_batch(
seq2slate_net, batch_size, state_dim, candidate_num, candidate_dim, device
)
training_data = DataLoader([batch], collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(
max_epochs=policy_gradient_interval,
gpus=None if device == torch.device("cpu") else 1,
logger=False,
)
pl_trainer.fit(trainer, training_data)
seq2slate_net = trainer.seq2slate_net.to(device)
# manual compute gradient
ranked_per_seq_log_probs = seq2slate_net_copy(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
loss = -(
torch.mean(
ranked_per_seq_log_probs
* torch.exp(ranked_per_seq_log_probs).detach()
/ batch.tgt_out_probs
* batch.slate_reward
)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate
)
# another way to compute gradient manually
ranked_per_seq_probs = torch.exp(
seq2slate_net_copy_copy(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
)
loss = -(
torch.mean(ranked_per_seq_probs / batch.tgt_out_probs * batch.slate_reward)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy_copy,
seq2slate_net,
policy_gradient_interval,
learning_rate,
)
@parameterized.expand(itertools.product(clamp_method_list, output_arch_list))
def test_seq2slate_trainer_off_policy_with_clamp(self, clamp_method, output_arch):
batch_size = 32
state_dim = 2
candidate_num = 15
candidate_dim = 4
hidden_size = 16
learning_rate = 1.0
device = torch.device("cpu")
policy_gradient_interval = 1
seq2slate_params = Seq2SlateParameters(
on_policy=False,
ips_clamp=IPSClamp(clamp_method=clamp_method, clamp_max=0.3),
)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
)
seq2slate_net_copy = copy.deepcopy(seq2slate_net)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
batch = create_off_policy_batch(
seq2slate_net, batch_size, state_dim, candidate_num, candidate_dim, device
)
training_data = DataLoader([batch], collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(max_epochs=policy_gradient_interval, logger=False)
pl_trainer.fit(trainer, training_data)
# manual compute gradient
ranked_per_seq_probs = torch.exp(
seq2slate_net_copy(
batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
).log_probs
)
logger.info(f"ips ratio={ranked_per_seq_probs / batch.tgt_out_probs}")
loss = -(
torch.mean(
ips_clamp(
ranked_per_seq_probs / batch.tgt_out_probs,
seq2slate_params.ips_clamp,
)
* batch.slate_reward
)
)
loss.backward()
self.assert_correct_gradient(
seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate
)
@parameterized.expand(
itertools.product(
output_arch_list, clamp_method_list, clamp_max_list, frechet_sort_shape_list
)
)
def test_compute_impt_smpl(self, output_arch, clamp_method, clamp_max, shape):
logger.info(f"output arch: {output_arch}")
logger.info(f"clamp method: {clamp_method}")
logger.info(f"clamp max: {clamp_max}")
logger.info(f"frechet shape: {shape}")
candidate_num = 5
candidate_dim = 2
state_dim = 1
hidden_size = 32
device = torch.device("cpu")
learning_rate = 0.001
policy_gradient_interval = 1
candidates = torch.randint(5, (candidate_num, candidate_dim)).float()
candidate_scores = torch.sum(candidates, dim=1)
seq2slate_params = Seq2SlateParameters(
on_policy=False,
ips_clamp=IPSClamp(clamp_method=clamp_method, clamp_max=clamp_max),
)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
all_permt = torch.tensor(
list(permutations(range(candidate_num), candidate_num))
)
sampler = FrechetSort(shape=shape, topk=candidate_num)
sum_of_logged_propensity = 0
sum_of_model_propensity = 0
sum_of_ips_ratio = 0
for i in range(len(all_permt)):
sample_action = all_permt[i]
logged_propensity = torch.exp(
sampler.log_prob(candidate_scores, sample_action)
)
batch = rlt.PreprocessedRankingInput.from_input(
state=torch.zeros(1, state_dim),
candidates=candidates.unsqueeze(0),
device=device,
action=sample_action.unsqueeze(0),
logged_propensities=logged_propensity.reshape(1, 1),
)
model_propensities = torch.exp(
seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE).log_probs
)
impt_smpl, clamped_impt_smpl = trainer._compute_impt_smpl(
model_propensities, logged_propensity
)
if impt_smpl > clamp_max:
if clamp_method == IPSClampMethod.AGGRESSIVE:
npt.asset_allclose(clamped_impt_smpl.detach().numpy(), 0, rtol=1e-5)
else:
npt.assert_allclose(
clamped_impt_smpl.detach().numpy(), clamp_max, rtol=1e-5
)
sum_of_model_propensity += model_propensities
sum_of_logged_propensity += logged_propensity
sum_of_ips_ratio += model_propensities / logged_propensity
logger.info(
f"shape={shape}, sample_action={sample_action}, logged_propensity={logged_propensity},"
f" model_propensity={model_propensities}"
)
logger.info(
f"shape {shape}, sum_of_logged_propensity={sum_of_logged_propensity}, "
f"sum_of_model_propensity={sum_of_model_propensity}, "
f"mean sum_of_ips_ratio={sum_of_ips_ratio / len(all_permt)}"
)
npt.assert_allclose(sum_of_logged_propensity.detach().numpy(), 1, rtol=1e-5)
npt.assert_allclose(sum_of_model_propensity.detach().numpy(), 1, rtol=1e-5)
@parameterized.expand(itertools.product(output_arch_list, frechet_sort_shape_list))
def test_ips_ratio_mean(self, output_arch, shape):
output_arch = Seq2SlateOutputArch.FRECHET_SORT
shape = 0.1
logger.info(f"output arch: {output_arch}")
logger.info(f"frechet shape: {shape}")
candidate_num = 5
candidate_dim = 2
state_dim = 1
hidden_size = 8
device = torch.device("cpu")
batch_size = 1024
num_batches = 400
learning_rate = 0.001
policy_gradient_interval = 1
state = torch.zeros(batch_size, state_dim)
# all data have same candidates
candidates = torch.randint(
5, (batch_size, candidate_num, candidate_dim)
).float()
candidates[1:] = candidates[0]
candidate_scores = torch.sum(candidates, dim=-1)
seq2slate_params = Seq2SlateParameters(
on_policy=False,
)
seq2slate_net = create_seq2slate_transformer(
state_dim, candidate_num, candidate_dim, hidden_size, output_arch
)
trainer = create_trainer(
seq2slate_net,
learning_rate,
seq2slate_params,
policy_gradient_interval,
)
sampler = FrechetSort(shape=shape, topk=candidate_num)
sum_of_ips_ratio = 0
for i in range(num_batches):
sample_outputs = [
sampler.sample_action(candidate_scores[j : j + 1])
for j in range(batch_size)
]
action = torch.stack(
list(map(lambda x: x.action.squeeze(0), sample_outputs))
)
logged_propensity = torch.stack(
list(map(lambda x: torch.exp(x.log_prob), sample_outputs))
)
batch = rlt.PreprocessedRankingInput.from_input(
state=state,
candidates=candidates,
device=device,
action=action,
logged_propensities=logged_propensity,
)
model_propensities = torch.exp(
seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE).log_probs
)
impt_smpl, _ = trainer._compute_impt_smpl(
model_propensities, logged_propensity
)
sum_of_ips_ratio += torch.mean(impt_smpl).detach().numpy()
mean_of_ips_ratio = sum_of_ips_ratio / (i + 1)
logger.info(f"{i}-th batch, mean ips ratio={mean_of_ips_ratio}")
if i > 100 and np.allclose(mean_of_ips_ratio, 1, atol=0.03):
return
raise Exception(f"Mean ips ratio {mean_of_ips_ratio} is not close to 1")
| [
"[email protected]"
] | |
0503c67578bed73112c1a1fd91865ab9220e8817 | fd1a3d5f1eb2a85a204b9694f26e2168e1e2fa10 | /msu/fall2013/ece867/project/src/coroutine.py | 63c83844bcb50a519b15c68480dac7474e52f568 | [] | no_license | csboling/studies | 61d3d31ce8ccd4c9370e19fa60fc4ad8fbc26e30 | 1a212d63420ff1602755950124136993bc967849 | refs/heads/master | 2021-01-21T14:08:59.397293 | 2016-03-30T17:49:21 | 2016-03-30T17:49:21 | 25,365,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | def coroutine(f):
def start(*args, **kwargs):
g = f(*args, **kwargs)
g.next()
return g
return start
@coroutine
def consume():
while True:
(yield)
@coroutine
def broadcast(targets):
while True:
msg = (yield)
for target in targets:
target.send(msg)
@coroutine
def accrue(depth, target):
w = [[]]*depth
window = circbuf(w)
while True:
for i in xrange(depth):
window.send((yield))
target.send(w)
@coroutine
def disperse(targets):
while True:
results = (yield)
for i in xrange(len(results)):
targets[i].send(results[i])
@coroutine
def printer():
while True:
print (yield)
class Flush(Exception):
pass
@coroutine
def circbuf(v, target=None):
size = len(v)
count = 0
while True:
for i in xrange(len(v)):
try:
v[i] = (yield)
if count < size:
count += 1
except Flush:
if target == None:
break
else:
tail = i - count
if tail < 0:
tail += size
while count:
target.send(v[tail])
tail += 1
if tail == size: tail = 0
count -= 1
| [
"[email protected]"
] | |
c7c8dbb3871913fc00fa414dd0a9cd0fb3f622c3 | ef61c5f177ee44ac08325335fc28a12f3fccbb58 | /resource_management/views/add_item/add_item.py | c02a116732e5c57847c249eb40a4f70e1961fbef | [] | no_license | bammidichandini/resource_management-chandini | 3c11c7b2eb5e2f8d3df5b55e4d3ee86a27ed5c3a | aa4ec50f0b36a818bebc2033cb39ee928e5be13c | refs/heads/master | 2022-12-01T19:59:25.366843 | 2020-07-23T09:10:42 | 2020-07-23T09:10:42 | 269,610,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | from functools import reduce
def add_item(*args, **kwargs): # pylint: disable=invalid-name
"""
Note: replace below mock implementation with your actual implementation
Request:
kwargs["user"] -> request user
kwargs["user_dto"] -> request user_dto
kwargs["request_object"] -> request body type object
kwargs["request_data"] -> request body data dict
kwargs["request_headers_obj"] -> request headers object
kwargs["request_query_params"] -> request query parameters object
Response :
return: tuple(response_status_code, response_object,
response_headers_object)
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response \
import endpoint_response
return endpoint_response(response_object)
"""
access_token = ''
http_authorization = args[0].META.get("HTTP_AUTHORIZATION")
if http_authorization is not None:
if len(http_authorization.split(" ")) == 2:
access_token = http_authorization.split(" ")[1]
http_source = args[0].META.get("HTTP_X_SOURCE")
kwargs.update({"access_token": access_token, 'source': http_source})
from .api_wrapper import api_wrapper
response_object = api_wrapper(*args, **kwargs)
allowed_primitive_types = [False, str, int, float]
from functools import reduce # pylint: disable=redefined-builtin
if response_object is None:
from django.http.response import HttpResponse
response_object = HttpResponse()
elif reduce((lambda a, b: a or isinstance(response_object, b)),
allowed_primitive_types):
from django.http.response import HttpResponse
response_object = HttpResponse(str(response_object))
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response \
import endpoint_response
return endpoint_response(response_object)
| [
"[email protected]"
] | |
b16c44dca67fbbadaa94bfac55dcddd6bcb70005 | dbfeec6da584a41f8f341535728385db4d777ddf | /scripts/analysis/constant_accuracy_overall.py | 60a0851b55bc554e5bc8a13d1d76b9a36a924242 | [
"MIT"
] | permissive | alan-turing-institute/CSV_Wrangling | 7b0abfcfc0d3a7a91252da80064952ede4c71578 | 3e073b8eee906c8fc71a5ce4499b07bbe67e8807 | refs/heads/master | 2022-01-24T13:31:36.381401 | 2022-01-20T22:13:17 | 2022-01-20T22:13:17 | 158,363,564 | 29 | 10 | MIT | 2022-01-20T22:13:48 | 2018-11-20T09:28:05 | TeX | UTF-8 | Python | false | false | 2,014 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Overall accuracy of a method averaged over multiple corpora.
Author: Gertjan van den Burg
Copyright (c) 2018 - The Alan Turing Institute
License: See the LICENSE file.
"""
import argparse
import sys
from common.detector_result import Status
from .core import load_detector_results
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
dest="reference",
help="Reference file(s) with ground truth",
required=True,
nargs="+",
)
parser.add_argument(
"-d",
dest="detector",
help="Detector result(s)",
required=True,
nargs="+",
)
parser.add_argument(
"-o", dest="output", help="Output tex file to write to", required=True
)
return parser.parse_args()
def load_and_merge(filenames):
results = {}
for res_file in filenames:
_, res = load_detector_results(res_file)
for fname in res:
if fname in results:
print(
"Error: duplicate result for file %s" % fname,
file=sys.stderr,
)
raise SystemExit
results[fname] = res[fname]
return results
def compute_accuracy_overall(ref_results, det_results):
total = 0
correct = 0
for fname in ref_results:
ref = ref_results[fname]
if not ref.status == Status.OK:
continue
total += 1
det = det_results[fname]
if not det.status == Status.OK:
continue
correct += ref.dialect == det.dialect
return correct / total * 100
def main():
args = parse_args()
reference_results = load_and_merge(args.reference)
detector_results = load_and_merge(args.detector)
acc = compute_accuracy_overall(reference_results, detector_results)
with open(args.output, "w") as fid:
fid.write("%.0f\\%%%%" % acc)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
bf8e4d7f124848db648351fe9e712bf866b91dd9 | 3f658c0098a66015840bd9d631987e6b937bb300 | /32.Stock_Trading_Alerts/venv/Lib/site-packages/twilio/rest/api/v2010/account/incoming_phone_number/assigned_add_on/assigned_add_on_extension.py | 4ec400ab539eed7c9f98bdcc94c76195c1a4a46d | [] | no_license | RohitPr/PythonProjects | 4cf7ec37cfba60afecc88ae542cc4155b72f4098 | 7dd807a45cd86cf0851cb95a1b1433805891f990 | refs/heads/main | 2023-06-01T06:42:40.147968 | 2021-06-13T00:57:05 | 2021-06-13T00:57:05 | 337,298,986 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,396 | py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AssignedAddOnExtensionList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, account_sid, resource_sid, assigned_add_on_sid):
"""
Initialize the AssignedAddOnExtensionList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:param resource_sid: The SID of the Phone Number to which the Add-on is assigned
:param assigned_add_on_sid: The SID that uniquely identifies the assigned Add-on installation
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionList
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionList
"""
super(AssignedAddOnExtensionList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'resource_sid': resource_sid,
'assigned_add_on_sid': assigned_add_on_sid,
}
self._uri = '/Accounts/{account_sid}/IncomingPhoneNumbers/{resource_sid}/AssignedAddOns/{assigned_add_on_sid}/Extensions.json'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams AssignedAddOnExtensionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists AssignedAddOnExtensionInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AssignedAddOnExtensionInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return AssignedAddOnExtensionPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AssignedAddOnExtensionInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AssignedAddOnExtensionPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a AssignedAddOnExtensionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
"""
return AssignedAddOnExtensionContext(
self._version,
account_sid=self._solution['account_sid'],
resource_sid=self._solution['resource_sid'],
assigned_add_on_sid=self._solution['assigned_add_on_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a AssignedAddOnExtensionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
"""
return AssignedAddOnExtensionContext(
self._version,
account_sid=self._solution['account_sid'],
resource_sid=self._solution['resource_sid'],
assigned_add_on_sid=self._solution['assigned_add_on_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AssignedAddOnExtensionList>'
class AssignedAddOnExtensionPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the AssignedAddOnExtensionPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:param resource_sid: The SID of the Phone Number to which the Add-on is assigned
:param assigned_add_on_sid: The SID that uniquely identifies the assigned Add-on installation
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionPage
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionPage
"""
super(AssignedAddOnExtensionPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AssignedAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
"""
return AssignedAddOnExtensionInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
resource_sid=self._solution['resource_sid'],
assigned_add_on_sid=self._solution['assigned_add_on_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AssignedAddOnExtensionPage>'
class AssignedAddOnExtensionContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, account_sid, resource_sid, assigned_add_on_sid,
sid):
"""
Initialize the AssignedAddOnExtensionContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param resource_sid: The SID of the Phone Number to which the Add-on is assigned
:param assigned_add_on_sid: The SID that uniquely identifies the assigned Add-on installation
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
"""
super(AssignedAddOnExtensionContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'resource_sid': resource_sid,
'assigned_add_on_sid': assigned_add_on_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/IncomingPhoneNumbers/{resource_sid}/AssignedAddOns/{assigned_add_on_sid}/Extensions/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch the AssignedAddOnExtensionInstance
:returns: The fetched AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return AssignedAddOnExtensionInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
resource_sid=self._solution['resource_sid'],
assigned_add_on_sid=self._solution['assigned_add_on_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.AssignedAddOnExtensionContext {}>'.format(context)
class AssignedAddOnExtensionInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, account_sid, resource_sid,
assigned_add_on_sid, sid=None):
"""
Initialize the AssignedAddOnExtensionInstance
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
"""
super(AssignedAddOnExtensionInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'resource_sid': payload.get('resource_sid'),
'assigned_add_on_sid': payload.get('assigned_add_on_sid'),
'friendly_name': payload.get('friendly_name'),
'product_name': payload.get('product_name'),
'unique_name': payload.get('unique_name'),
'uri': payload.get('uri'),
'enabled': payload.get('enabled'),
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'resource_sid': resource_sid,
'assigned_add_on_sid': assigned_add_on_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AssignedAddOnExtensionContext for this AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext
"""
if self._context is None:
self._context = AssignedAddOnExtensionContext(
self._version,
account_sid=self._solution['account_sid'],
resource_sid=self._solution['resource_sid'],
assigned_add_on_sid=self._solution['assigned_add_on_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def resource_sid(self):
"""
:returns: The SID of the Phone Number to which the Add-on is assigned
:rtype: unicode
"""
return self._properties['resource_sid']
@property
def assigned_add_on_sid(self):
"""
:returns: The SID that uniquely identifies the assigned Add-on installation
:rtype: unicode
"""
return self._properties['assigned_add_on_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def product_name(self):
"""
:returns: A string that you assigned to describe the Product this Extension is used within
:rtype: unicode
"""
return self._properties['product_name']
@property
def unique_name(self):
"""
:returns: An application-defined string that uniquely identifies the resource
:rtype: unicode
"""
return self._properties['unique_name']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def enabled(self):
"""
:returns: Whether the Extension will be invoked
:rtype: bool
"""
return self._properties['enabled']
def fetch(self):
"""
Fetch the AssignedAddOnExtensionInstance
:returns: The fetched AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.AssignedAddOnExtensionInstance {}>'.format(context)
| [
"[email protected]"
] | |
fa195a799a4d1a1682803e41510dba4a6bde5c4d | 088314e3bd6ca7ef34d15f2aa45b743b363641d9 | /tasks/R2R/finetune.py | 95d942e9c106d9fcac12e61852807d8ccf8f8283 | [
"MIT"
] | permissive | weituo12321/PREVALENT_R2R | 7a27d580fcbe8f72a209697d053ca3eb2013e3a0 | 868fb53d6b7978bbb10439a59e65044c811ee5c2 | refs/heads/master | 2022-11-24T00:54:32.385940 | 2020-07-24T17:56:42 | 2020-07-24T17:56:42 | 248,832,547 | 8 | 7 | MIT | 2022-11-22T02:10:54 | 2020-03-20T19:07:08 | Python | UTF-8 | Python | false | false | 64,248 | py |
import os, argparse, json
import time, copy, random, pickle
import numpy as np
import pandas as pd
from collections import defaultdict
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from utils import read_vocab, write_vocab, build_vocab, Tokenizer, SplitTokenizer, padding_idx, \
timeSince, boolean_string, preprocess_get_pano_states, current_best
from env import R2RBatch, EnvBatch
from model import EncoderLSTM, EncoderMultiLSTM, BertEncoder, MultiBertEncoder, GptEncoder, MultiGptEncoder,\
TransformerEncoder, MultiTransformerEncoder, BertImgEncoder, BertAddEncoder,MultiVilBertEncoder, MultiVilAddEncoder, MultiAddLoadEncoder, AttnDecoderLSTM
from pytorch_transformers import BertForMaskedLM,BertTokenizer
from agent import Seq2SeqAgent
from eval import Evaluation
from feature import Feature
import pprint
import pdb
class CustomDataParallel(nn.Module):
def __init__(self, model):
super(CustomDataParallel, self).__init__()
self.model = nn.DataParallel(model).cuda()
print(type(self.model))
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.model.module, name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_vocab', dest='train_vocab', type=str, default='train_vocab.txt', help='train_vocab filename (in snapshots folder)')
parser.add_argument('--trainval_vocab', dest='trainval_vocab', type=str, default='trainval_vocab.txt', help='trainval_vocab filename (in snapshots folder)')
parser.add_argument('--use_glove', dest='use_glove', type=boolean_string, default=False, help='whether use glove')
parser.add_argument('--glove_path', dest='glove_path', type=str, default='tasks/R2R/data/train_glove.npy', help='path to the glove file')
parser.add_argument('--result_dir', dest='result_dir', type=str, default='tasks/R2R/results/', help='path to the result_dir file')
parser.add_argument('--snapshot_dir', dest='snapshot_dir', type=str, default='tasks/R2R/snapshots/', help='path to the snapshot_dir file')
parser.add_argument('--plot_dir', dest='plot_dir', type=str, default='tasks/R2R/plots/', help='path to the plot_dir file')
parser.add_argument('--min_count', dest='min_count', type=int, default=5, help='word min_count')
parser.add_argument('--max_input_length', dest='max_input_length', default=80, type=int, help='max_input_length')
parser.add_argument('--batch_size', dest='batch_size', default=10, type=int, help='batch size')
parser.add_argument('--max_episode_len', dest='max_episode_len', default=8, type=int, help='max_episode_len')
parser.add_argument('--word_embedding_size', dest='word_embedding_size', default=256, type=int, help='word_embedding_size')
parser.add_argument('--action_embedding_size', dest='action_embedding_size', default=64, type=int, help='action_embedding_size')
parser.add_argument('--hidden_size', dest='hidden_size', default=1024, type=int, help='decoder hidden_size')
parser.add_argument('--enc_hidden_size', dest='enc_hidden_size', default=1024, type=int, help='encoder hidden_size')
parser.add_argument('--feature_store', dest='feature_store', type=str, default='img_features/ResNet-152-imagenet.tsv', help='path to the image feature file')
parser.add_argument('--feature_size', dest='feature_size', default=2048, type=int, help='feature_size')
parser.add_argument('--feature_all_size', dest='feature_all_size', default=2176, type=int, help='imgaction_size')
parser.add_argument('--n_iters', dest='n_iters', default=70000, type=int, help='n_iters')
parser.add_argument('--n_iters_resume', dest='n_iters_resume', default=0, type=int, help='n_iters_resume')
parser.add_argument('--n_iters_pretrain_resume', dest='n_iters_pretrain_resume', default=0, type=int, help='n_iters_pretrain_resume')
parser.add_argument('--ss_n_pretrain_iters', dest='ss_n_pretrain_iters', default=-1, type=int, help='scheduled sampling n_iters in pretrain')
parser.add_argument('--ss_n_iters', dest='ss_n_iters', default=65000, type=int, help='scheduled sampling n_iters')
parser.add_argument('--finetune_iters', dest='finetune_iters', default=-1, type=int, help='finetune_iters for BERT')
parser.add_argument('--finetune_batchsize', dest='finetune_batchsize', default=-1, type=int, help='finetune_batchsize for BERT')
parser.add_argument('--sc_after', dest='sc_after', default=-1, type=int, help='SELF_CRITICAL_AFTER')
parser.add_argument('--pretrain_model_path', dest='pretrain_model_path', type=str, default='tasks/R2R/snapshots/', help='the path of pretrained model')
parser.add_argument('--pretrain_lm_model_path', dest='pretrain_lm_model_path', type=str, default='pretrained_models/', help='the path of pretrained lm model')
parser.add_argument('--pretrain_model_name', dest='pretrain_model_name', type=str, default=None, help='the name of pretrained model')
parser.add_argument('--pretrain_decoder_name', dest='pretrain_decoder_name', type=str, default=None, help='the name of decoder model')
parser.add_argument('--log_every', dest='log_every', default=20, type=int, help='log_every')
parser.add_argument('--save_ckpt', dest='save_ckpt', default=48, type=int, help='dump model checkpoint, default -1')
parser.add_argument('--schedule_ratio', dest='schedule_ratio', default=0.2, type=float, help='ratio for sample or teacher')
parser.add_argument('--schedule_anneal', dest='schedule_anneal', action='store_true', help='schedule_ratio is annealling or not')
parser.add_argument('--dropout_ratio', dest='dropout_ratio', default=0.4, type=float, help='dropout_ratio')
parser.add_argument('--temp_alpha', dest='temp_alpha', default=1.0, type=float, help='temperate alpha for softmax')
parser.add_argument('--learning_rate', dest='learning_rate', default=5e-05, type=float, help='learning_rate')
parser.add_argument('--sc_learning_rate', dest='sc_learning_rate', default=2e-05, type=float, help='sc_learning_rate')
parser.add_argument('--weight_decay', dest='weight_decay', default=0.0005, type=float, help='weight_decay')
parser.add_argument('--optm', dest='optm', default='Adamax', type=str, help='Adam, Adamax, RMSprop')
parser.add_argument('--sc_reward_scale', dest='sc_reward_scale', default=1., type=float, help='sc_reward_scale')
parser.add_argument('--sc_discouted_immediate_r_scale', dest='sc_discouted_immediate_r_scale', default=0., type=float, help='sc_discouted_immediate_r_scale')
parser.add_argument('--sc_length_scale', dest='sc_length_scale', default=0., type=float, help='sc_length_scale')
parser.add_argument('--feedback_method', dest='feedback_method', type=str, default='teacher', help='sample or teacher')
parser.add_argument('--bidirectional', dest='bidirectional', type=boolean_string, default=True, help='bidirectional')
parser.add_argument('--monotonic_sc', dest='monotonic_sc', type=boolean_string, default=False, help='monotonic self-critic')
parser.add_argument('--panoramic', dest='panoramic', type=boolean_string, default=True, help='panoramic img')
parser.add_argument('--action_space', dest='action_space', type=int, default=-1, help='6 or -1(navigable viewpoints)')
parser.add_argument('--ctrl_feature', dest='ctrl_feature', type=boolean_string, default=False, help='ctrl_feature')
parser.add_argument('--ctrl_f_net', dest='ctrl_f_net', type=str, default='linear', help='imglinear, linear, nonlinear, imgnl or deconv')
parser.add_argument('--aux_n_iters', dest='aux_n_iters', type=int, help='update auxiliary net after aux_n_iters')
parser.add_argument('--aux_ratio', dest='aux_ratio', type=float, help='aux_ratio')
parser.add_argument('--accu_n_iters', dest='accu_n_iters', type=int, default=0, help='gradient accumulation')
parser.add_argument('--att_ctx_merge', dest='att_ctx_merge', type=str, default='mean', help='mean cat mean sum (to merge attention)')
parser.add_argument('--ctx_dropout_ratio', dest='ctx_dropout_ratio', type=float, default=0.0, help='ctx_dropout_ratio')
parser.add_argument('--clip_gradient', dest='clip_gradient', type=float, default=0.1, help='clip_gradient')
parser.add_argument('--clip_gradient_norm', dest='clip_gradient_norm', type=float, default=0.0, help='clip gradient norm')
parser.add_argument('--multi_share', dest='multi_share', type=boolean_string, default=True, help='share encoders in EncoderMultiLSTM')
parser.add_argument('--decoder_init', dest='decoder_init', type=boolean_string, default=True, help='init decoder with lstm output')
parser.add_argument('--dec_h_init', dest='dec_h_init', type=str, default='tanh', help='linear, tanh, none')
parser.add_argument('--dec_c_init', dest='dec_c_init', type=str, default='none', help='linear, tanh, none')
parser.add_argument('--dec_h_type', dest='dec_h_type', type=str, default='vc', help='none or vc')
parser.add_argument('--encoder_type', dest='encoder_type', type=str, default='bert', help='lstm transformer bert or gpt')
parser.add_argument('--top_lstm', dest='top_lstm', type=boolean_string, default=True, help='add lstm to the top of transformers')
parser.add_argument('--transformer_update', dest='transformer_update', type=boolean_string, default=False, help='update Bert')
parser.add_argument('--bert_n_layers', dest='bert_n_layers', type=int, default=1, help='bert_n_layers')
parser.add_argument('--bert_type', dest='bert_type', type=str, default="small", help='small or large')
parser.add_argument('--heads', dest='heads', type=int, default=4, help='heads in transformer')
parser.add_argument('--transformer_emb_size', dest='transformer_emb_size', type=int, default=512, help='transformer_emb_size')
parser.add_argument('--transformer_d_ff', dest='transformer_d_ff', type=int, default=1024, help='transformer_d_ff')
parser.add_argument('--transformer_num_layers', dest='transformer_num_layers', type=int, default=1, help='transformer_num_layers')
parser.add_argument('--vl_layers', dest='vl_layers', type=int, default=1, help='vl_layers')
parser.add_argument('--use_cuda', dest='use_cuda', type=boolean_string, default=True, help='use_cuda')
parser.add_argument('--gpu_id', dest='gpu_id', type=int, default=0, help='gpu_id')
parser.add_argument('--train', dest='train', type=boolean_string, default=True, help='train or test')
parser.add_argument('--pretrain_score_name', dest='pretrain_score_name', type=str, default='sr_sum', help='sr_sum spl_sum sr_unseen spl_unseen')
parser.add_argument('--train_score_name', dest='train_score_name', type=str, default='sr_sum', help='sr_sum spl_sum sr_unseen spl_unseen')
parser.add_argument('--sc_score_name', dest='sc_score_name', type=str, default='sr_sum', help='sr_sum spl_sum sr_unseen spl_unseen')
parser.add_argument('--beam_size', dest='beam_size', type=int, default=1, help='beam_size for inference')
parser.add_argument('--use_speaker', dest='use_speaker', type=boolean_string, default=False, help='use speaker for inference')
parser.add_argument('--speaker_weight', dest='speaker_weight', type=str, default='0.0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95', help='speaker weight for inference')
parser.add_argument('--speaker_prefix', dest='speaker_prefix', type=str, default='tasks/R2R/snapshots/release/speaker_final_release', help='speaker enc dec prefix')
parser.add_argument('--speaker_merge', dest='speaker_merge', type=str, default='mean', help='how speaker score for multiple sentences')
parser.add_argument('--state_factored', type=boolean_string, default=False, help='state factored beam search')
parser.add_argument('--successors', dest='successors', type=int, default=1, help='successors for state_factored_search inference')
parser.add_argument('--use_pretrain', action='store_true', help='pretrain or not')
parser.add_argument('--train_in_pretrain', action='store_true', help='pretrain train or not')
parser.add_argument('--pretrain_splits', type=str, default="literal_speaker_data_augmentation_paths", help="pretrain dataset")
parser.add_argument('--pretrain_n_iters', dest='pretrain_n_iters', type=int, default=0, help='pretrain_n_iters')
parser.add_argument('--pretrain_n_sentences', dest='pretrain_n_sentences', type=int, default=3,
help='This is only for pretraining when using EncoderMultiLSTM. In normal train/test/val, it will be reset to 3')
parser.add_argument('--single_sentence_test', dest='single_sentence_test', type=boolean_string, default=False,
help='run additional test for single sentence as input')
parser.add_argument('--val_splits', dest='val_splits', type=str, default='val_seen,val_unseen', help='test can be added')
parser.add_argument('--warmup_iters', dest='warmup_iters', type=int, default=0, help='warmup iterations for BertImgEncoder')
parser.add_argument('--reward_func', dest='reward_func', type=str, default='spl', help='reward function: sr_sc, spl, spl_sc, spl_last, spl_last_sc')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--philly', action='store_true', help='program runs on Philly, used to redirect `write_model_path`')
parser.add_argument('--dump_result', action='store_true', help='dump result file')
parser.add_argument('--test_A', action='store_true', help='testing_settingA')
# args = '--panoramic True ' \
# '--action_space -1 ' \
# '--result_dir /home/xiul/Programs/exps/tmp/test/results/ ' \
# '--snapshot_dir /home/xiul/Programs/exps/tmp/test/snapshots/ ' \
# '--plot_dir /home/xiul/Programs/exps/tmp/test/plots/ ' \
# '--max_episode_len 8 ' \
# '--att_ctx_merge mean ' \
# '--n_iters 1500 ' \
# '--batch_size 64 --log_every 64 --feedback_method teacher ' \
# '--enc_hidden_size 1024 ' \
# '--hidden_size 1024 '
# args = parser.parse_args(args.split())
# args = '--action_space -1 ' \
# '--result_dir /home/xiul/Programs/exps/tmp/test/results/ ' \
# '--snapshot_dir /home/xiul/Programs/exps/tmp/test/snapshots/ ' \
# '--plot_dir /home/xiul/Programs/exps/tmp/test/plots/ ' \
# '--att_ctx_merge mean --batch_size 64 --log_every 64 --feedback_method teacher --clip_gradient_norm 0 ' \
# '--ss_n_pretrain_iters 400 --pretrain_n_iters 500 --ss_n_iters 500 --n_iters 600 ' \
# '--use_pretrain --pretrain_n_sentences 4 '\
# '--pretrain_splits sample_seed10_20_30_40_50_data_aug_paths '\
# '--enc_hidden_size 1024 --hidden_size 1024 '
# args = '--feedback_method teacher ' \
# '--result_dir /home/xql/Source/Subgoal/tasks/R2R/exps/test_trans_mean/results/ ' \
# '--snapshot_dir /home/xql/Source/Subgoal/tasks/R2R/exps/test_trans_mean/snapshots/ ' \
# '--plot_dir /home/xql/Source/Subgoal/tasks/R2R/exps/test_trans_mean/plots/ ' \
# '--ss_n_iters 20000 ' \
# '--dropout_ratio 0.4 ' \
# '--dec_h_type vc --schedule_ratio 0.3 ' \
# '--optm Adam --clip_gradient_norm 0 --log_every 64 ' \
# '--action_space -1 ' \
# '--train_score_name sr_unseen ' \
# '--n_iters 40000 ' \
# '--enc_hidden_size 1024 --hidden_size 1024 ' \
# '--bidirectional True ' \
# '--batch_size 10 ' \
# '--encoder_type transformer ' \
# '--transformer_emb_size 512 --top_lstm True ' \
# '--att_ctx_merge mean '
# args = parser.parse_args(args.split())
args = parser.parse_args()
params = vars(args)
assert params['panoramic'] or (params['panoramic'] == False and params['action_space'] == 6)
RESULT_DIR = params['result_dir'] #'tasks/R2R/results/'
SNAPSHOT_DIR = params['snapshot_dir'] #'tasks/R2R/snapshots/'
PLOT_DIR = params['plot_dir'] #'tasks/R2R/plots/'
TRAIN_VOCAB = os.path.join(SNAPSHOT_DIR, params['train_vocab']) #'tasks/R2R/data/train_vocab.txt'
TRAINVAL_VOCAB = os.path.join(SNAPSHOT_DIR, params['trainval_vocab'])
MIN_COUNT = params['min_count']
use_glove = params['use_glove']
glove_path = params['glove_path']
batch_size = params['batch_size'] #100
encoder_type = params['encoder_type'] # lstm
top_lstm = params['top_lstm']
transformer_update = params['transformer_update']
bert_n_layers = params['bert_n_layers'] # 1
reverse_input = True
MAX_INPUT_LENGTH = params['max_input_length'] #80
max_episode_len = params['max_episode_len'] #20
word_embedding_size = params['word_embedding_size'] #256
action_embedding_size = params['action_embedding_size'] # 32
hidden_size = params['hidden_size'] #512
enc_hidden_size = params['enc_hidden_size'] # 512
heads = params['heads']
transformer_emb_size = params['transformer_emb_size']
transformer_d_ff = params['transformer_d_ff']
transformer_num_layers = params['transformer_num_layers']
vl_layers = params['vl_layers']
bidirectional = params['bidirectional'] #False
feedback_method = params['feedback_method'] #'sample' # teacher or sample
schedule_ratio = params['schedule_ratio']
schedule_anneal = params['schedule_anneal']
dropout_ratio = params['dropout_ratio'] # 0.5
learning_rate = params['learning_rate'] # 0.0001
weight_decay = params['weight_decay'] # 0.0005
sc_learning_rate = params['sc_learning_rate']
optm = params['optm']
sc_reward_scale = params['sc_reward_scale']
sc_discouted_immediate_r_scale = params['sc_discouted_immediate_r_scale']
sc_length_scale = params['sc_length_scale']
#n_iters = 5000 if feedback_method == 'teacher' else 20000
n_iters = params['n_iters'] # 60000 # jolin
ss_n_iters = params['ss_n_iters']
ss_n_pretrain_iters = params['ss_n_pretrain_iters']
finetune_iters = params['finetune_iters']
finetune_batchsize = params['finetune_batchsize']
log_every = params['log_every']
save_ckpt = params['save_ckpt']
monotonic = params['monotonic_sc'] #False # jolin
panoramic = params['panoramic']
action_space = params['action_space']
ctrl_feature = params['ctrl_feature'] # False
ctrl_f_net = params['ctrl_f_net'] # linear
aux_n_iters = params['aux_n_iters']
aux_ratio = params['aux_ratio']
accu_n_iters = params['accu_n_iters']
att_ctx_merge = params['att_ctx_merge']
ctx_dropout_ratio = params['ctx_dropout_ratio']
multi_share = params['multi_share']
decoder_init = params['decoder_init']
pretrain_score_name = params['pretrain_score_name']
train_score_name = params['train_score_name']
sc_score_name = params['sc_score_name']
use_pretrain = params['use_pretrain']
train_in_pretrain = params['train_in_pretrain']
pretrain_splits = params['pretrain_splits'].split(',')
pretrain_n_iters = params['pretrain_n_iters']
pretrain_n_sentences = params['pretrain_n_sentences']
assert multi_share or pretrain_n_sentences==3
train_splits= ['train']
val_splits= params['val_splits'].split(',')
from env import debug_beam
#if debug_beam:
# train_splits=['debug1']
# val_splits=['debug1']
beam_size = params['beam_size']
use_speaker = params['use_speaker']
speaker_weight = params['speaker_weight'].split(',')
speaker_prefix = params['speaker_prefix']
state_factored = params['state_factored']
speaker_merge = params['speaker_merge']
successors = params['successors']
dump_result = params['dump_result']
if dump_result:
print('Info: Temporary result files will be dumped!')
else:
print('Info: Save space mode ON. All previous best models and temporary result files will be deleted!')
if ctrl_feature: assert aux_n_iters>1
model_prefix = 'seq2seq_%s_imagenet' % (feedback_method)
# controlled by env variable CUDA_AVAILABLE_DEVICES. If =4 and #(real devices)<4, cpu
#gpu_id = str(params['gpu_id'])
gpuid = 'cpu'
if params['use_cuda']:
if torch.cuda.is_available():
gpuid='cuda:0'
torch.backends.cudnn.deterministic = True
device = torch.device(gpuid)
is_train = params['train']
print('------ Check CUDA Info ------')
print('cuda:', torch.cuda.is_available())
if torch.cuda.is_available():
print('gpu num:', torch.cuda.device_count())
print('gpu IDs:', torch.cuda.current_device())
print('gpu name:', torch.cuda.get_device_name(0))
print('-----------------------------')
FEATURE_STORE = params['feature_store']
FEATURE_SIZE = params['feature_size']
FEATURE_ALL_SIZE = params['feature_all_size']
N_ITERS_RESUME = params['n_iters_resume'] # 0 #45000
n_iters_pretrain_resume = params['n_iters_pretrain_resume']
SELF_CRITICAL_AFTER = params['sc_after'] #-1 #320 # print('copied from 4snapshots')
single_sentence_test = params['single_sentence_test']
features = FEATURE_STORE
model_prefix = 'seq2seq_%s_imagenet' % (feedback_method)
use_bert = (encoder_type in ['bert', 'gpt','vlbert']) # for tokenizer and dataloader
ctx_hidden_size = enc_hidden_size * (2 if bidirectional else 1)
if (use_bert and not top_lstm):
ctx_hidden_size = 768
elif encoder_type=='transformer':
if not top_lstm:
ctx_hidden_size = transformer_emb_size
bert_pre_best_model_iter = 0 # for space reason, delete previous best model
bert_type = params['bert_type']
glove = np.load(glove_path) if use_glove else None
if use_glove and (glove.shape[1] != word_embedding_size):
print('Warning: reset word_embedding_size according to glove (dim=%d)' % glove.shape[1])
params['word_embedding_size'] = glove.shape[1]
word_embedding_size = glove.shape[1]
submit_splits = train_splits+val_splits
nav_graphs = None # navigable loc cache
philly = params['philly']
seed = params['seed']
agent_params = {}
agent_params['clip_gradient'] = params['clip_gradient']
agent_params['clip_gradient_norm'] = params['clip_gradient_norm']
agent_params['reward_func'] = params['reward_func']
#agent_params['schedule_ratio'] = params['schedule_ratio']
agent_params['schedule_ratio'] = 0.3 #params['schedule_ratio']
agent_params['temp_alpha'] = params['temp_alpha']
agent_params['test_A'] = params['test_A']
encoder_params = {}
encoder_params['dec_h_init'] = params['dec_h_init']
encoder_params['dec_c_init'] = params['dec_c_init']
decoder_params = {}
decoder_params['dec_h_type'] = params['dec_h_type']
navigable_locs_path = "tasks/R2R/data"
pretrain_model_path = params['pretrain_model_path']
pretrain_lm_model_path = params['pretrain_lm_model_path']
pretrain_model_name = params['pretrain_model_name'] #pretrained_lm_00000.model.ep0 for test
pretrain_decoder_name = params['pretrain_decoder_name'] #pretrained_lm_00000.model.ep0 for test
warmup_iters = params['warmup_iters']
if philly: # use philly
print('Info: Use Philly, all the output folders are reset.')
RESULT_DIR = os.path.join(os.getenv('PT_OUTPUT_DIR'), params['result_dir'])
PLOT_DIR = os.path.join(os.getenv('PT_OUTPUT_DIR'), params['plot_dir'])
SNAPSHOT_DIR = os.path.join(os.getenv('PT_OUTPUT_DIR'), params['snapshot_dir'])
TRAIN_VOCAB = os.path.join(SNAPSHOT_DIR, params['train_vocab'])
TRAINVAL_VOCAB = os.path.join(SNAPSHOT_DIR, params['trainval_vocab'])
navigable_locs_path = os.path.join(os.getenv('PT_OUTPUT_DIR'), "tasks/R2R/data")
print('RESULT_DIR', RESULT_DIR)
print('PLOT_DIR', PLOT_DIR)
print('SNAPSHOT_DIR', SNAPSHOT_DIR)
print('TRAIN_VOC', TRAIN_VOCAB)
def train(train_env, finetunes, train_Eval, encoder, decoder, monotonic, n_iters, resume_split_arr, cur_split_arr, score_name, jump_iters, log_every=log_every, val_envs={}, n_iters_resume=0,warmup_iters=0):
''' Train on training set, validating on both seen and unseen. '''
data_log = defaultdict(list)
agent = Seq2SeqAgent(train_env, "", encoder, decoder, 'resume' if n_iters_resume>0 else 10, aux_ratio, decoder_init, agent_params, monotonic, max_episode_len, accu_n_iters = accu_n_iters)
if hasattr(encoder, 'n_sentences'): encoder_n_sentences = encoder.n_sentences
best_model_iter = n_iters_resume
best_sr, best_spl, best_score = 0, 0, 0
best_eval = 0
idx = 0
if n_iters_resume>0:
split_string = "-".join(resume_split_arr)
print('Resuming from', n_iters_resume, 'on', split_string)
if len(resume_split_arr) == 0:
enc_path = '%s%s_%s_enc_iter_%d' % (pretrain_model_path, model_prefix, split_string, n_iters_resume)
dec_path = '%s%s_%s_dec_iter_%d' % (pretrain_model_path, model_prefix, split_string, n_iters_resume)
else:
enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, n_iters_resume)
dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, n_iters_resume)
agent.load(enc_path, dec_path)
loss_str = ''
if hasattr(encoder, 'n_sentences'): encoder.set_n_sentences(3)
for env_name, (env, evaluator) in val_envs.items():
result_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, n_iters_resume)
score_summary, env_loss_str, data_log = test_env(env_name, env, evaluator, agent, result_path, feedback_method, data_log, 1)
loss_str += env_loss_str
best_sr += score_summary['success_rate']
best_spl += score_summary['spl']
agent.env = train_env
print('\n'.join([str((k, round(v[0], 4))) for k, v in sorted(data_log.items())]))
best_score = current_best(data_log, -1, score_name)
best_eval = current_best(data_log, -1, 'spl_unseen')
print('Resumed', score_name, 'score:', best_score)
data_log = defaultdict(list)
if jump_iters>0:
idx = jump_iters
print('Jump to pretrain_n_iters', jump_iters)
try:
df_path = '%s%s_log.csv' % (PLOT_DIR, model_prefix)
df = pd.read_csv(df_path)
data_log = {key: list(df[key]) for key in df.keys() if key != 'Unnamed: 0'}
new_best = False
for v_id, v in enumerate(df['iteration']):
best_score_old = current_best(df, v_id, score_name)
if best_score < best_score_old:
best_score = best_score_old
best_model_iter = v
new_best = True
if new_best:
print('Best score found in plot file at ', best_model_iter,', best_score/best_model_iter reseted (model won\'t be reseted).')
except:
pass
if hasattr(encoder, 'n_sentences'): encoder.set_n_sentences(encoder_n_sentences)
if 0 <agent_params['schedule_ratio'] < 1.0:
print('Training with Scheduled Sampling, sampling ratio %.1f' % (agent_params['schedule_ratio']))
else:
print('Training with %s feedback' % feedback_method)
if optm == 'RMSprop':
optim_func = optim.RMSprop
elif optm == 'Adamax':
optim_func = optim.Adamax
else: # default: Adam
optim_func = optim.Adam
#encoder_param_lst = list()
#lst = list()
#for name, pa in encoder.named_parameters():
# if pa.requires_grad:
# #lst.append(name)
# encoder_param_lst.append(pa)
#encoder_optimizer = optim_func(encoder_param_lst, lr=learning_rate, weight_decay=weight_decay)
encoder_optimizer = optim_func(encoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
decoder_optimizer = optim_func(decoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
start = time.time()
epoch = 0
idx = idx-log_every
sc_started = False
finetune_start = False
best_model = {
'iter': -1,
'encoder': copy.deepcopy(agent.encoder.state_dict()),
'decoder': copy.deepcopy(agent.decoder.state_dict()),
'torch_cuda_rn': copy.deepcopy(torch.cuda.random.get_rng_state()),
'torch_rn': copy.deepcopy(torch.random.get_rng_state()),
'random': copy.deepcopy(random.getstate())
}
myidx = 0
while idx+log_every < n_iters:
idx += log_every
interval = min(log_every, n_iters - idx)
iter = idx + interval
myidx += interval
print("PROGRESS: {}%".format(round((myidx) * 100 / n_iters, 4)))
# scheduled
if schedule_anneal:
agent_params['schedule_ratio'] = max(params['schedule_ratio'], (1.0-float(iter)/n_iters))
if iter <= n_iters_resume:
epo_inc = agent.rollout_notrain(interval)
epoch += epo_inc
continue
# debug, add finetune for BERT
if (not finetune_start) and encoder_type == 'bert' and (0 < finetune_iters <= iter):
print("------start BERT finetune on iter %d------" % (iter))
finetune_start = True
agent.encoder.update = True
#learning_rate = 5e-5
if encoder_type == 'vlbert' and agent.encoder.__class__.__name__ in ['BertImgEncoder']:
if warmup_iters > 0 and myidx > warmup_iters:
agent.encoder.update = False
warmup_iters = -1
if (not finetune_start) and encoder_type == 'vlbert' and (0 < finetune_iters <= iter):
print("------start VLBERT finetune on iter %d------" % (iter))
finetune_start = True
agent.encoder.update = True
agent.env = finetunes[0]
del train_env
del val_envs
train_env = finetunes[0]
val_envs = finetunes[1]
#agent.env.batch_size = finetune_batchsize # change batchsize accordingly
#agent.env.reset(False)
#text_bert_param = agent.encoder.flip_text_bert_params(agent.encoder.update)
#encoder_optimizer.add_param_group({'params': text_bert_param})
#learning_rate = 5e-5
if sc_started or (SELF_CRITICAL_AFTER != -1 and iter > SELF_CRITICAL_AFTER):
if score_name!=sc_score_name:
print('score_name changed in SC', score_name, '->',sc_score_name)
temp_data_log = defaultdict(list)
for env_name, (env, evaluator) in val_envs.items():
_, _, temp_data_log = test_env(env_name, env, evaluator, agent,
'%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, best_model_iter), feedback_method, temp_data_log, 1)
agent.env = train_env
best_score = current_best(temp_data_log, -1, sc_score_name)
print('Best', sc_score_name, 'score:', best_score)
score_name = sc_score_name
if agent.decoder.ctrl_feature:
agent.decoder.ctrl_feature = False
print('Auxiliary task turned off.')
# jolin: self-critical
if (not sc_started) and iter == SELF_CRITICAL_AFTER + log_every:
print('SC step')
sc_started = True
print('Loading best model for SC from iter', best_model_iter)
#split_string = "-".join(train_env.splits)
split_string = "-".join(cur_split_arr)
#enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
#dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
if len(resume_split_arr) == 0:
enc_path = '%s%s_%s_enc_iter_%d' % (pretrain_model_path, model_prefix, split_string, best_model_iter)
dec_path = '%s%s_%s_dec_iter_%d' % (pretrain_model_path, model_prefix, split_string, best_model_iter)
else:
enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
agent.load(enc_path, dec_path)
encoder_optimizer = optim.Adam(agent.encoder.parameters(), lr=sc_learning_rate, weight_decay=weight_decay)
decoder_optimizer = optim.Adam(agent.decoder.parameters(), lr=sc_learning_rate, weight_decay=weight_decay)
# idx = best_model_iter
agent.env.reset_epoch()
print('Using',sc_score_name,'for saving SC best model')
epo_inc = agent.rl_train(train_Eval, encoder_optimizer, decoder_optimizer, interval, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale)
else:
# Train for log_every interval
epo_inc = agent.train(encoder_optimizer, decoder_optimizer, interval, aux_n_iters, feedback=feedback_method)
# jolin: returned from self.env._next_minibatch(R2RBatch)
epoch += epo_inc
data_log['iteration'].append(iter)
train_losses = np.array(agent.losses)
assert len(train_losses) == interval
train_loss_avg = np.average(train_losses)
data_log['train loss'].append(train_loss_avg)
loss_str = 'train loss: %.4f' % train_loss_avg
if ctrl_feature:
if agent.decoder.ctrl_feature:
train_loss_ctrl_f_avg = np.average(np.array(agent.losses_ctrl_f))
data_log['loss_ctrl_f'].append(train_loss_ctrl_f_avg)
loss_str += ' loss_ctrl_f: %.4f' % train_loss_ctrl_f_avg
else:
data_log['loss_ctrl_f'].append(0.)
#split_string = "-".join(train_env.splits)
split_string = '-'.join(cur_split_arr)
enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
#agent.save(enc_path, dec_path) # write or not
# Run validation
success_rate, spl = 0, 0
if hasattr(encoder, 'n_sentences'): encoder.set_n_sentences(3)
for env_name, (env, evaluator) in val_envs.items():
score_summary, env_loss_str, data_log = test_env(env_name, env, evaluator, agent,
'%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, iter), feedback_method, data_log, 1)
loss_str += "," + env_name
loss_str += env_loss_str
success_rate += score_summary['success_rate']
spl += score_summary['spl']
candidate_score = current_best(data_log, -1, score_name)
eval_candidate = current_best(data_log, -1, 'spl_unseen')
if candidate_score>best_score:
bert_pre_best_model_iter = best_model_iter
best_model_iter = iter
best_score = candidate_score
loss_str+=' best'+score_name.upper()
if monotonic:
agent.copy_seq2seq()
# best one
best_model['iter'] = iter
best_model['encoder'] = copy.deepcopy(agent.encoder.state_dict())
best_model['decoder'] = copy.deepcopy(agent.decoder.state_dict())
best_model['torch_cuda_rn'] = copy.deepcopy(torch.cuda.random.get_rng_state())
best_model['torch_rn'] = copy.deepcopy(torch.random.get_rng_state())
best_model['random'] = copy.deepcopy(random.getstate())
if spl>best_spl:
best_spl=spl
loss_str+=' bestSPL'
if success_rate>best_sr:
best_sr=success_rate
loss_str+=' bestSR'
if hasattr(encoder, 'n_sentences'):
encoder.set_n_sentences(encoder_n_sentences)
if finetune_start:
agent.env = finetunes[0]
else:
agent.env = train_env
ss_str = "ss_ratio %.2f" % (agent_params['schedule_ratio'])
print(('%s (%d %d %d%%) %s %s' % (timeSince(start, float(iter) / n_iters), iter, epoch, float(iter) / n_iters * 100, loss_str, ss_str)))
if eval_candidate > best_eval:
best_eval = eval_candidate
print("EVALERR: {}%".format(best_eval))
if save_ckpt != -1 and iter%save_ckpt == 0:
save_best_model(best_model, SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
df = pd.DataFrame(data_log)
df.set_index('iteration')
df_path = '%s%s_log.csv' % (PLOT_DIR, model_prefix)
write_num = 0
while (write_num < 10):
try:
df.to_csv(df_path)
break
except:
write_num += 1
# debug: torch save best
save_best_model(best_model, SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
return best_model_iter
def create_folders(path):
""" recursively create folders """
if not os.path.isdir(path):
while True:
try:
os.makedirs(path)
except:
pass
time.sleep(1)
else:
break
def save_best_model(best_model, SNAPSHOT_DIR, model_prefix, split_string, best_model_iter):
""" Save the current best model """
enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, best_model_iter)
torch.save(best_model['encoder'], enc_path)
torch.save(best_model['decoder'], dec_path)
if torch.cuda.is_available():
torch.save(best_model['torch_cuda_rn'], dec_path + '.rng.gpu')
torch.save(best_model['torch_rn'], dec_path + '.rng')
with open(dec_path + '.rng2', 'wb') as f:
pickle.dump(best_model['random'], f)
def setup():
''' Build+Dump vocabulary for models other than bert.
Create folders. Dump parameters '''
global navigable_locs_path
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Check for vocabs
if not os.path.exists(RESULT_DIR):
#os.mkdir(RESULT_DIR)
create_folders(RESULT_DIR)
if not os.path.exists(PLOT_DIR):
#os.mkdir(PLOT_DIR)
create_folders(PLOT_DIR)
if not os.path.exists(SNAPSHOT_DIR):
#os.mkdir(SNAPSHOT_DIR)
create_folders(SNAPSHOT_DIR)
if not os.path.exists(navigable_locs_path):
create_folders(navigable_locs_path)
if encoder_type!='bert' or encoder_type!='gpt':
if not os.path.exists(TRAIN_VOCAB):
write_vocab(build_vocab(splits=['train'], min_count=MIN_COUNT), TRAIN_VOCAB)
if not os.path.exists(TRAINVAL_VOCAB):
write_vocab(build_vocab(splits=['train', 'val_seen', 'val_unseen'], min_count=0), TRAINVAL_VOCAB)
navigable_locs_path += '/navigable_locs.json'
print('navigable_locs_path', navigable_locs_path)
preprocess_get_pano_states(navigable_locs_path)
global nav_graphs
if action_space == -1: # load navigable location cache
with open(navigable_locs_path, 'r') as f:
nav_graphs = json.load(f)
print('Parameters: ')
print(json.dumps(params, indent=2))
if is_train:
with open(SNAPSHOT_DIR+'params.json', 'w') as fp:
json.dump(params, fp)
def test_submission():
''' Train on combined training and validation sets, and generate test submission. '''
# TODO: think how to add pretraining here
setup()
# Create a batch training environment that will also preprocess text
if use_bert:
tok = SplitTokenizer(0, MAX_INPUT_LENGTH)
else:
vocab = read_vocab(TRAINVAL_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)
feature_store = Feature(features, panoramic) # jolin
train_env = R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, batch_size=batch_size,
splits= submit_splits, tokenizer=tok, att_ctx_merge=att_ctx_merge) #, subgoal
train_Eval = Evaluation(train_splits, encoder_type) #, subgoal) # jolin: add Evaluation() for reward calculation
# Build models and train
# enc_hidden_size = hidden_size // 2 if bidirectional else hidden_size
if encoder_type == 'bert':
encoder = BertEncoder(hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm).to(device)
elif encoder_type == 'gpt':
encoder = GptEncoder(hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm).to(device)
elif encoder_type == 'transformer':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiTransformerEncoder(len(vocab), transformer_emb_size, padding_idx, dropout_ratio,
multi_share, pretrain_n_sentences, glove, heads, transformer_d_ff,
hidden_size, num_layers=transformer_num_layers).to(device)
else:
assert not use_glove
encoder = TransformerEncoder(len(vocab), transformer_emb_size, padding_idx, dropout_ratio,
glove, heads, transformer_d_ff, hidden_size, num_layers=transformer_num_layers).to(device)
else:
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = EncoderMultiLSTM(len(vocab), word_embedding_size, enc_hidden_size, hidden_size, padding_idx,
dropout_ratio, multi_share, pretrain_n_sentences, glove, encoder_params, bidirectional=bidirectional).to(device)
else:
encoder = EncoderLSTM(len(vocab), word_embedding_size, enc_hidden_size, hidden_size, padding_idx,
dropout_ratio, glove=glove, bidirectional=bidirectional).to(device)
decoder = AttnDecoderLSTM(Seq2SeqAgent.n_inputs(), Seq2SeqAgent.n_outputs(),
action_embedding_size, ctx_hidden_size, hidden_size, dropout_ratio,
FEATURE_SIZE, panoramic, action_space, ctrl_feature, ctrl_f_net, att_ctx_merge, ctx_dropout_ratio, decoder_params).to(device)
if att_ctx_merge in ['mean','cat','max','sum']: encoder.set_n_sentences(3)
train(train_env, finetunes, train_Eval, encoder, 3, decoder, monotonic, n_iters, None, -1)
# Generate test submission
test_env = R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, batch_size=batch_size, splits=['test'], tokenizer=tok, att_ctx_merge=att_ctx_merge) # , subgoal
agent = Seq2SeqAgent(test_env, "", encoder, decoder,'resume', aux_ratio, decoder_init, agent_params, monotonic=monotonic,
episode_len=max_episode_len) # , subgoal
agent.results_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, 'test', 20000)
agent.test(use_dropout=False, feedback='argmax', beam_size=beam_size)
agent.write_results(dump_result)
def train_val(n_iters_resume=0):
''' Train on the training set, and validate on seen and unseen splits. '''
global n_iters_pretrain_resume
setup()
# Create a batch training environment that will also preprocess text
if use_bert:
tok = SplitTokenizer(0, MAX_INPUT_LENGTH)
else:
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)
feature_store = Feature(features, panoramic)
# Build models and train
#enc_hidden_size = hidden_size // 2 if bidirectional else hidden_size
if encoder_type == 'vlbert':
if att_ctx_merge in ['mean','cat','max','sum']:
if pretrain_model_name is not None:
#encoder = MultiAddLoadEncoder(FEATURE_ALL_SIZE, enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers,reverse_input, top_lstm, multi_share, pretrain_n_sentences, vl_layers, pretrain_lm_model_path + pretrain_model_name, bert_type)
encoder = MultiVilAddEncoder(FEATURE_ALL_SIZE, enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers,reverse_input, top_lstm, multi_share, pretrain_n_sentences, vl_layers, bert_type)
encoder.load_state_dict(torch.load(pretrain_lm_model_path + pretrain_model_name))
else:
encoder = MultiVilAddEncoder(FEATURE_ALL_SIZE, enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers,reverse_input, top_lstm, multi_share, pretrain_n_sentences, vl_layers, bert_type)
else:
if pretrain_model_name is not None:
print("Using the pretrained lm model from %s" %(pretrain_model_name))
encoder = torch.load(pretrain_lm_model_path + pretrain_model_name)
encoder.dropout_ratio = dropout_ratio
encoder.drop = nn.Dropout(p=dropout_ratio)
encoder.update = transformer_update
encoder.reverse_input = reverse_input
encoder.top_lstm = top_lstm
encoder.pretrain = False
else:
#encoder = BertAddEncoder(FEATURE_ALL_SIZE,enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm, vl_layers,bert_type)
encoder = BertImgEncoder(FEATURE_ALL_SIZE,enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm,bert_type)
#encoder = VicEncoder(FEATURE_ALL_SIZE,enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm, False,bert_type)
encoder.pretrain = False
elif encoder_type == 'bert':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiBertEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers,
reverse_input, top_lstm, multi_share, pretrain_n_sentences, bert_type).to(device)
else:
if pretrain_model_name is not None:
encoder = HugLangEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm, bert_type).to(device)
print("Using the pretrained lm model from %s" %(pretrain_model_name))
premodel = BertForMaskedLM.from_pretrained(pretrain_model_name)
encoder.bert = premodel.bert
else:
encoder = BertEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm, bert_type).to(device)
elif encoder_type=='gpt':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiGptEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm, multi_share, pretrain_n_sentences).to(device)
else:
encoder = GptEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm).to(device)
elif encoder_type == 'transformer':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiTransformerEncoder(len(vocab), transformer_emb_size, padding_idx, dropout_ratio,
multi_share, pretrain_n_sentences, glove, heads, transformer_d_ff, enc_hidden_size, hidden_size, top_lstm, bidirectional, num_layers=transformer_num_layers).to(device)
else:
assert not use_glove
encoder = TransformerEncoder(len(vocab), transformer_emb_size, padding_idx, dropout_ratio, glove, heads,
transformer_d_ff, enc_hidden_size, hidden_size, top_lstm, bidirectional, num_layers=transformer_num_layers).to(device)
else:
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = EncoderMultiLSTM(len(vocab), word_embedding_size, enc_hidden_size, hidden_size, padding_idx,
dropout_ratio, multi_share, pretrain_n_sentences, glove, encoder_params, bidirectional=bidirectional).to(device)
else:
encoder = EncoderLSTM(len(vocab), word_embedding_size, enc_hidden_size, hidden_size, padding_idx,
dropout_ratio, glove=glove, bidirectional=bidirectional).to(device)
decoder = AttnDecoderLSTM(Seq2SeqAgent.n_inputs(), Seq2SeqAgent.n_outputs(),
action_embedding_size, ctx_hidden_size, hidden_size, dropout_ratio,
FEATURE_SIZE, panoramic, action_space, ctrl_feature, ctrl_f_net, att_ctx_merge, ctx_dropout_ratio, decoder_params).to(device)
if pretrain_decoder_name is not None:
decoder.load_state_dict(torch.load(pretrain_lm_model_path + pretrain_decoder_name))
#envs = EnvBatch(feature_store=feature_store, batch_size=batch_size)
#if torch.cuda.device_count() > 1:
# encoder = CustomDataParallel(encoder)
# decoder = CustomDataParallel(decoder)
encoder.cuda()
decoder.cuda()
# Create validation environments
val_envs = {split: (R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, batch_size=batch_size,
splits=[split], tokenizer=tok, att_ctx_merge=att_ctx_merge), Evaluation([split], encoder_type)) for split in val_splits} # subgoal
# Create training env
train_env = R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, batch_size=batch_size,
splits=train_splits, tokenizer=tok, att_ctx_merge=att_ctx_merge)
train_Eval = Evaluation(train_splits, encoder_type)# jolin: add Evaluation() for reward calculation
finetunes = None
# Create pretraining env
resume_splits = train_env.splits
resume_splits_array = []
cur_splits_array = []
last_train_iters = -1
if use_pretrain:
print('Pretraining Step')
pretrain_env = R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, batch_size=batch_size,
splits=pretrain_splits, tokenizer=tok, att_ctx_merge=att_ctx_merge, min_n_sentences=pretrain_n_sentences)
# pre-trained scheduled sampling
if (agent_params['schedule_ratio'] > 0 and agent_params['schedule_ratio'] < 1.0) and ss_n_pretrain_iters > 0:
print('Pretraining with Scheduled Sampling, sampling ratio %.1f' % (agent_params['schedule_ratio']))
#cur_splits_array = pretrain_env.splits
cur_splits_array = ['ss', 'pretrain']
#best_model_ss_pretrain_iter = train(pretrain_env, None, encoder, decoder, None, ss_n_iters, pretrain_env.splits, pretrain_score_name, -1, log_every, val_envs=val_envs, n_iters_resume=n_iters_pretrain_resume)
best_model_ss_pretrain_iter = train(pretrain_env, None, None, encoder, decoder, None, ss_n_pretrain_iters, resume_splits_array, cur_splits_array,
pretrain_score_name, last_train_iters, log_every, val_envs=val_envs, n_iters_resume=n_iters_pretrain_resume,warmup_iters=warmup_iters)
n_iters_pretrain_resume = best_model_ss_pretrain_iter
agent_params['schedule_ratio'] = -1.0
print('Changing to %s Pretraining with the best model at iteration %d' % (feedback_method, n_iters_pretrain_resume))
resume_splits_array = ['ss', 'pretrain']
last_train_iters = ss_n_pretrain_iters
if train_in_pretrain:
# training; may need to cancel training for pre-train
cur_splits_array = ['pretrain']
n_iters_resume = train(pretrain_env, None, None, encoder, decoder, None, pretrain_n_iters, resume_splits_array, cur_splits_array, #pretrain_env.splits,
pretrain_score_name, last_train_iters, val_envs=val_envs, n_iters_resume=n_iters_pretrain_resume,warmup_iters=warmup_iters)
resume_splits = pretrain_splits
resume_splits_array = ['pretrain']
last_train_iters = pretrain_n_iters
else:
print('Skip Train in pre-training!')
n_iters_resume = n_iters_pretrain_resume
resume_splits_array = ['ss', 'pretrain']
# agent_params['schedule_ratio'] = params['schedule_ratio']
agent_params['schedule_ratio'] = params['schedule_ratio']
else:
if n_iters_resume>0: # use train splits
pass
elif n_iters_pretrain_resume>0:
print('Skip pretraining but use pretrained model')
n_iters_resume = n_iters_pretrain_resume
resume_splits = pretrain_splits
agent_params['schedule_ratio'] = params['schedule_ratio']
if att_ctx_merge in ['mean','cat','max','sum']: encoder.set_n_sentences(3)
print('Training Step')
# scheduled sampling
if 0 < agent_params['schedule_ratio'] < 1.0 and ss_n_iters > 0:
print('Training with Scheduled Sampling, sampling ratio %.1f' % (agent_params['schedule_ratio']))
# train_splits_array = resume_splits
#resume_splits_array = ['pretrain']
cur_splits_array = ['ss', 'train']
best_model_ss_iter = train(train_env, finetunes, train_Eval, encoder, decoder, monotonic, ss_n_iters, resume_splits_array, cur_splits_array, #resume_splits,
train_score_name, last_train_iters if use_pretrain else -1, val_envs=val_envs, n_iters_resume=n_iters_resume,warmup_iters=warmup_iters)
n_iters_resume = best_model_ss_iter
agent_params['schedule_ratio'] = -1.0
print('Changing to %s training with the best model at iteration %d' % (feedback_method, n_iters_resume))
resume_splits_array = ['ss', 'train']
last_train_iters = ss_n_iters
# training
#resume_splits_array = ['ss', 'train']
cur_splits_array = ['train']
best_model_iter = train(train_env, finetunes, train_Eval, encoder, decoder, monotonic, n_iters, resume_splits_array, cur_splits_array, #resume_splits,
train_score_name, last_train_iters if use_pretrain else -1, val_envs=val_envs, n_iters_resume=n_iters_resume,warmup_iters=warmup_iters)
print("The best model iter is %d" % (best_model_iter))
return best_model_iter
def load_test(n_iters_resume):
setup()
# Create a batch training environment that will also preprocess text
if use_bert:
tok = SplitTokenizer(0, MAX_INPUT_LENGTH)
else:
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=MAX_INPUT_LENGTH)
feature_store = Feature(features, panoramic) # jolin
#envs = EnvBatch(feature_store=feature_store, batch_size=batch_size, beam_size=beam_size)
train_env = R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, beam_size, batch_size=batch_size,
splits=train_splits, tokenizer=tok, att_ctx_merge=att_ctx_merge) # , subgoal
# Creat validation environments
val_envs = {split: (R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, beam_size, batch_size=batch_size, splits=[split],
tokenizer=tok, att_ctx_merge=att_ctx_merge), Evaluation([split], encoder_type)) for split in val_splits} # , subgoal
# Build models and train
# enc_hidden_size = hidden_size // 2 if bidirectional else hidden_size
if encoder_type =='bert':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiBertEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers,
reverse_input, top_lstm, multi_share, pretrain_n_sentences, bert_type).to(device)
else:
encoder = BertEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm, bert_type).to(device)
#encoder = BertEncoder(hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm).to(device)
elif encoder_type == 'gpt':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiGptEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers,
reverse_input, top_lstm, multi_share, pretrain_n_sentences).to(device)
else:
encoder = GptEncoder(enc_hidden_size, hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm).to(device)
#encoder = GptEncoder(hidden_size, dropout_ratio, bidirectional, transformer_update, bert_n_layers, reverse_input, top_lstm).to(device)
elif encoder_type == 'transformer':
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = MultiTransformerEncoder(len(vocab), transformer_emb_size, padding_idx, dropout_ratio,
multi_share, 3, glove, heads, transformer_d_ff, hidden_size, num_layers=transformer_num_layers).to(device)
else:
assert not use_glove
encoder = TransformerEncoder(len(vocab), transformer_emb_size, padding_idx, dropout_ratio,
glove, heads, transformer_d_ff, hidden_size, num_layers=transformer_num_layers).to(device)
else:
if att_ctx_merge in ['mean','cat','max','sum']:
encoder = EncoderMultiLSTM(len(vocab), word_embedding_size, enc_hidden_size, hidden_size, padding_idx,
dropout_ratio, multi_share, 3, glove, encoder_params, bidirectional=bidirectional).to(device)
else:
encoder = EncoderLSTM(len(vocab), word_embedding_size, enc_hidden_size, hidden_size, padding_idx,
dropout_ratio, glove=glove, bidirectional=bidirectional).to(device)
decoder = AttnDecoderLSTM(Seq2SeqAgent.n_inputs(), Seq2SeqAgent.n_outputs(),
action_embedding_size, ctx_hidden_size, hidden_size, dropout_ratio,
FEATURE_SIZE, panoramic, action_space, ctrl_feature, ctrl_f_net, att_ctx_merge, ctx_dropout_ratio, decoder_params).to(device)
agent = Seq2SeqAgent(train_env, "", encoder, decoder, 'resume', aux_ratio, decoder_init, agent_params, monotonic, max_episode_len, state_factored=state_factored) # , subgoal
#split_string = "-".join(train_splits)
cur_split_arr = ['train']
split_string = "-".join(cur_split_arr)
#enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, n_iters_resume)
#dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, n_iters_resume)
enc_path = '%s%s_%s_enc_iter_%d' % (pretrain_model_path, model_prefix, split_string, n_iters_resume)
dec_path = '%s%s_%s_dec_iter_%d' % (pretrain_model_path, model_prefix, split_string, n_iters_resume)
agent.load(enc_path, dec_path)
###############################load speaker#################################
if use_speaker:
print('Loading speaker for inference...')
from collections import namedtuple
SpeakerArgs = namedtuple('SpeakerArgs',['use_train_subset', 'n_iters', 'no_save','result_dir','snapshot_dir','plot_dir','seed','image_feature_type','image_feature_datasets'])
from speaker import train_speaker
speaker_args = SpeakerArgs(use_train_subset=False, n_iters=20000,no_save=False,result_dir=train_speaker.RESULT_DIR, snapshot_dir=train_speaker.SNAPSHOT_DIR, plot_dir=train_speaker.PLOT_DIR,seed=10,image_feature_datasets=['imagenet'],image_feature_type=['mean_pooled'])
speaker, _, _ = train_speaker.train_setup(speaker_args)
load_args = {'map_location':device}
speaker.load(speaker_prefix, **load_args)
print('Speaker loaded')
else:
speaker = None
############################################################################
loss_str, data_log = '', defaultdict(list)
for env_name, (env, evaluator) in val_envs.items():
json_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, n_iters_resume)
score_summary, env_loss_str, data_log = test_env(env_name, env, evaluator, agent, json_path, feedback_method, data_log, beam_size, speaker)
loss_str += env_loss_str
print('Result of Iteration ', n_iters_resume)
print('\n'.join([str((k, round(v[0], 4))) for k, v in sorted(data_log.items())]))
print('=========================================================')
if hasattr(encoder, 'n_sentences') and single_sentence_test:
encoder.set_n_sentences(1)
print('====Additional test for single instruction performance============')
# Creat validation environments
val_envs = {split: (
R2RBatch(feature_store, nav_graphs, panoramic, action_space, ctrl_feature, encoder_type, beam_size=beam_size, batch_size=batch_size,
splits=[split], tokenizer=tok, att_ctx_merge='Eval'), Evaluation([split], encoder_type)) for split in val_splits if split!='test'} # , subgoal
loss_str, data_log = '', defaultdict(list)
for env_name, (env, evaluator) in val_envs.items():
json_path = '%s%s_%s_iter_%d_add.json' % (RESULT_DIR, model_prefix, env_name, n_iters_resume)
score_summary, env_loss_str, data_log = test_env(env_name, env, evaluator, agent, json_path, feedback_method, data_log, beam_size)
loss_str += env_loss_str
print('Result of Iteration ', n_iters_resume)
print('\n'.join([str((k, round(v[0], 4))) for k, v in sorted(data_log.items())]))
print('====Additional test for single instruction performance finished===')
def test_env(env_name, env, evaluator, agent, results_path, feedback, data_log, beam_size, speaker=None):
save_env = agent.env # for restore
save_results_path = agent.results_path # for restore
agent.env = env
agent.results_path = results_path
env_loss_str = ""
"""
if env_name!='test' and beam_size==1:
# Get validation loss under the same conditions as training
agent.test(use_dropout=True, feedback=feedback, allow_cheat=True, beam_size=beam_size, successors=successors, speaker=(None, None, None, None))
val_losses = np.array(agent.losses)
val_loss_avg = np.average(val_losses)
data_log['%s loss' % env_name].append(val_loss_avg)
if ctrl_feature:
if agent.decoder.ctrl_feature:
val_losses_ctrl_f = np.array(agent.losses_ctrl_f)
val_loss_ctrl_f_avg = np.average(val_losses_ctrl_f)
data_log['%s loss_ctrl_f' % env_name].append(val_loss_ctrl_f_avg)
else:
data_log['%s loss_ctrl_f' % env_name].append(0.)
"""
# Get validation distance from goal under test evaluation conditions
agent.test(use_dropout=False, feedback='argmax', beam_size=beam_size, successors=successors, speaker=(speaker,speaker_weight,speaker_merge,evaluator))
output = agent.write_results(dump_result)
score_summary = None
if env_name!='test':
if dump_result:
score_summary, _ = evaluator.score(agent.results_path)
else: score_summary, _ = evaluator.score_output(output)
if beam_size == 1:
#env_loss_str += ', %s loss: %.4f' % (env_name, val_loss_avg)
if ctrl_feature:
if agent.decoder.ctrl_feature:
env_loss_str += ', %s loss_ctrl_f: %.4f' % (env_name, val_loss_ctrl_f_avg)
for metric, val in score_summary.items():
data_log['%s %s' % (env_name, metric)].append(val)
if metric in ['success_rate','spl']:
env_loss_str += ', %s: %.3f' % (metric, val)
# restore
agent.env = save_env
agent.results_path = save_results_path
return score_summary, env_loss_str, data_log
if is_train:
assert beam_size==1
N_ITERS_RESUME = train_val(N_ITERS_RESUME) # resume from iter
else:
load_test(N_ITERS_RESUME) # test iter
# test_submission()
| [
"[email protected]"
] | |
ee853b1f9ace43cba6d45251c3e5f56064033d04 | a7d497669dc91e6432216145a550755d42f6bb69 | /src/cogent3/util/transform.py | e6859405cf90f5a8c193163b7ae0393da4be8a4f | [
"BSD-3-Clause"
] | permissive | jbw900/cogent3 | 8887ee4b8be02a2086ad8d6fcaa54afe462baee5 | 9168c9b86d851992d008c8f730a5516b89aef835 | refs/heads/master | 2020-09-10T19:19:36.443143 | 2019-11-14T23:48:34 | 2019-11-14T23:48:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,677 | py | #!/usr/bin/env python
"""Provides transformations of functions and other objects.
Includes:
Standard combinatorial higher-order functions adapted from David Mertz (2003),
"Text Processing in Python", Chapter 1.
Functions for performing complex tests on strings, e.g. includes_any or
includes_all.
Functions for generating combinations, permutations, or cartesian products
of lists.
"""
__author__ = "Sandra Smit"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["Sandra Smit", "Rob Knight", "Zongzhi Liu"]
__license__ = "BSD-3"
__version__ = "2019.10.24a"
__maintainer__ = "Sandra Smit"
__email__ = "[email protected]"
__status__ = "Production"
maketrans = str.maketrans
# standard combinatorial HOF's from Mertz
def per_shortest(total, x, y):
"""Divides total by min(len(x), len(y)).
Useful for normalizing per-item results from sequences that are zipped
together. Always returns 0 if one of the sequences is empty (to
avoid divide by zero error).
"""
shortest = min(len(x), len(y))
if not shortest:
return 0
else:
return total / shortest
def per_longest(total, x, y):
"""Divides total by max(len(x), len(y)).
Useful for normalizing per-item results from sequences that are zipped
together. Always returns 0 if one of the sequences is empty (to
avoid divide by zero error).
"""
longest = max(len(x), len(y))
if not longest:
return 0
else:
return total / longest
class for_seq(object):
"""Returns function that applies f(i,j) to i,j in zip(first, second).
f: f(i,j) applying to elements of the sequence.
aggregator: method to reduce the list of results to a scalar. Default: sum.
normalizer: f(total, i, j) that normalizes the total as a function of
i and j. Default is length_normalizer (divides by the length of the shorter
of i and j). If normalizer is None, no normalization is performed.
Will always truncate to length of the shorter sequence (because of the use
of zip).
"""
def __init__(self, f, aggregator=sum, normalizer=per_shortest):
self.f = f
self.aggregator = aggregator
self.normalizer = normalizer
def __call__(self, first, second):
f = self.f
if self.normalizer is None:
return self.aggregator([f(i, j) for i, j in zip(first, second)])
else:
return self.normalizer(
self.aggregator([f(i, j) for i, j in zip(first, second)]), first, second
)
# convenience functions for modifying objects
class KeepChars(object):
"""Returns a filter object o(s): call to return a filtered string.
Specifically, strips out everything in s that is not in keep.
This filter is case sensitive by default.
"""
allchars = bytes(range(256))
def __init__(self, keep, case_sens=True):
"""Returns a new KeepChars object, based on string keep"""
if not case_sens:
low = keep.lower()
up = keep.upper()
keep = low + up
keep = keep.encode("utf-8")
self._strip_table = dict([(c, None) for c in self.allchars if c not in keep])
def __call__(self, s):
"""f(s) -> s, translates using self.allchars and self.delchars"""
if s is None:
raise TypeError
if isinstance(s, bytes):
s = s.decode("utf8")
s = str(s)
return s.translate(self._strip_table)
def first_index_in_set(seq, items):
"""Returns index of first occurrence of any of items in seq, or None."""
for i, s in enumerate(seq):
if s in items:
return i
| [
"[email protected]"
] | |
2ed208d6f7e98069fb6da757364000b2bbd9ed4e | 6b3cccf29a604cf6e433bf411f71c9f2692e1c93 | /openapi_core/schema/schemas/unmarshallers.py | b3d0ece1a8b9e32093802a0be93761dfc1ecadd3 | [
"BSD-3-Clause"
] | permissive | srgkm/openapi-core | d826f942fb4551e4d7193e4cb7c156a48c2feb0b | 4b712cb2b5d045166cecce89ca9e47eb3da5163f | refs/heads/master | 2020-12-13T21:16:23.803664 | 2020-01-13T10:43:32 | 2020-01-13T10:43:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,844 | py | from six import text_type, binary_type, integer_types
from openapi_core.schema.schemas.enums import SchemaFormat, SchemaType
from openapi_core.schema.schemas.exceptions import (
InvalidCustomFormatSchemaValue,
UnmarshallerStrictTypeError,
FormatterNotFoundError,
)
from openapi_core.schema.schemas.util import (
forcebool, format_date, format_datetime, format_byte, format_uuid,
format_number,
)
class StrictUnmarshaller(object):
STRICT_TYPES = ()
def __call__(self, value, type_format=SchemaFormat.NONE, strict=True):
if self.STRICT_TYPES and strict and not isinstance(
value, self.STRICT_TYPES):
raise UnmarshallerStrictTypeError(value, self.STRICT_TYPES)
return value
class PrimitiveTypeUnmarshaller(StrictUnmarshaller):
FORMATTERS = {
SchemaFormat.NONE: lambda x: x,
}
def __init__(self, custom_formatters=None):
if custom_formatters is None:
custom_formatters = {}
self.custom_formatters = custom_formatters
def __call__(self, value, type_format=SchemaFormat.NONE, strict=True):
value = super(PrimitiveTypeUnmarshaller, self).__call__(
value, type_format=type_format, strict=strict)
try:
schema_format = SchemaFormat(type_format)
except ValueError:
formatter = self.custom_formatters.get(type_format)
else:
formatters = self.get_formatters()
formatter = formatters.get(schema_format)
if formatter is None:
raise FormatterNotFoundError(value, type_format)
try:
return formatter(value)
except ValueError as exc:
raise InvalidCustomFormatSchemaValue(value, type_format, exc)
def get_formatters(self):
return self.FORMATTERS
class StringUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (text_type, binary_type)
FORMATTERS = {
SchemaFormat.NONE: text_type,
SchemaFormat.PASSWORD: text_type,
SchemaFormat.DATE: format_date,
SchemaFormat.DATETIME: format_datetime,
SchemaFormat.BINARY: binary_type,
SchemaFormat.UUID: format_uuid,
SchemaFormat.BYTE: format_byte,
}
class IntegerUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = integer_types
FORMATTERS = {
SchemaFormat.NONE: int,
SchemaFormat.INT32: int,
SchemaFormat.INT64: int,
}
class NumberUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (float, ) + integer_types
FORMATTERS = {
SchemaFormat.NONE: format_number,
SchemaFormat.FLOAT: float,
SchemaFormat.DOUBLE: float,
}
class BooleanUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (bool, )
FORMATTERS = {
SchemaFormat.NONE: forcebool,
}
| [
"[email protected]"
] | |
748aab2283fd79854e093608d29d48ce9b13ead4 | 53c91272444bfab92e7e89e0358047b27eab1125 | /03.代码/豆瓣评论/scrapydouban/scrapydouban/middlewares.py | 61c53041598417f8298ce947bccbc3bfded65d56 | [] | no_license | MrFiona/python_module_summary | 2bbf9f30e0fbfe302e7e6c429754fa7bf4bfc411 | 4e36f6f5f6abed10fc06b16b0ed7c12cde7746d0 | refs/heads/master | 2021-01-20T03:54:38.105298 | 2019-01-07T07:28:36 | 2019-01-07T07:28:36 | 101,373,212 | 2 | 0 | null | 2018-04-15T05:56:45 | 2017-08-25T06:28:52 | Jupyter Notebook | UTF-8 | Python | false | false | 1,886 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ScrapydoubanSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
fa6323760616793f168150c475710ec0666cf457 | 8b441f592a6deb9b0a515cbd92bb4663ad79ffe4 | /churn/delivery/old/churn_delivery_log_time.py | 3b110eaa1ce83e7b40343bb350296743681d4009 | [] | no_license | carnaum2/use-cases | 0d391a6a10bb70b60a4025152a278b0e4c595d01 | 24920e3828234da691ab643b6dd9a0aa0a5c0df5 | refs/heads/master | 2022-12-07T03:41:34.299274 | 2020-09-07T10:20:32 | 2020-09-07T10:20:32 | 293,249,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,726 | py | # coding: utf-8
import sys
import datetime as dt
import os
logger = None
from pyspark.sql.functions import size, coalesce, col, lit, collect_list, udf, when, desc
from pyspark.sql.utils import AnalysisException
def set_paths_and_logger():
'''
:return:
'''
import sys, os, re
pathname = os.path.dirname(sys.argv[0]) # abs path to file (not included)
print(pathname)
if pathname.startswith("/var/SP/data/bdpmdses/deliveries_churn/"):
import re
root_dir = re.match("^(.*)use-cases(.*)",
"/var/SP/data/bdpmdses/deliveries_churn/version1/use-cases/churn/delivery").group(1)
#from churn.utils.constants import CHURN_DELIVERIES_DIR
#root_dir = CHURN_DELIVERIES_DIR
else:
root_dir = re.match("(.*)use-cases/churn(.*)", pathname).group(1)
print("Detected '{}' as root dir".format(root_dir))
if root_dir not in sys.path:
sys.path.append(root_dir)
print("Added '{}' to path".format(root_dir))
mypath = os.path.join(root_dir, "use-cases")
if mypath not in sys.path:
sys.path.append(mypath)
print("Added '{}' to path".format(mypath))
mypath = os.path.join(root_dir, "amdocs_informational_dataset")
if mypath not in sys.path:
sys.path.insert(0, mypath)
print("Added '{}' to path".format(mypath))
import pykhaos.utils.custom_logger as clogger
logging_file = os.path.join(os.environ.get('BDA_USER_HOME', ''), "logging",
"churn_delivery_log_time_" + dt.datetime.now().strftime("%Y%m%d_%H%M%S") + ".log")
logger = clogger.configure_logger(log_filename=logging_file, std_channel=sys.stderr, logger_name="")
logger.info("Logging to file {}".format(logging_file))
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
return logger
def set_training_cycles(spark,closing_day, segmentfilter):
# Check if there are some previous tr_cycle_
from churn.datapreparation.general.model_outputs_manager import get_training_dates
existing_models = get_training_dates(spark, closing_day, segmentfilter)
if not existing_models:
if logger: logger.info(
"tr_cycle_ini and tr_cycle_end were not specified by user and there are not computed scores. They'll by computed automatically using horizon={}....".format(
horizon))
from pykhaos.utils.date_functions import move_date_n_cycles
import datetime
today_str = datetime.datetime.today().strftime("%Y%m%d")
tr_cycle_ini = move_date_n_cycles(today_str, n=-(
horizon + 1)) # first: go to the closest cycle (-1); then, move backwards 8 cycles = -9
tr_cycle_end = tr_cycle_ini
else:
training_closing_date = existing_models[0]["training_closing_date"]
tr_cycle_ini = training_closing_date[:8]
tr_cycle_end = training_closing_date[10:]
if logger: logger.info(
"tr_cycle_ini and tr_cycle_end were not specified by user but we found some previous scores with tr_ini={} tr_end={".format(
tr_cycle_ini, tr_cycle_end))
# FIXME falta comprobar que cumplen con el horizon
return tr_cycle_ini, tr_cycle_end
if __name__ == "__main__":
os.environ["SPARK_HOME"] = "/opt/cloudera/parcels/SPARK2/lib/spark2"
os.environ["HADOOP_CONF_DIR"] = "/opt/cloudera/parcels/SPARK2/lib/spark2/conf/yarn-conf"
logger = set_paths_and_logger()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# ARGPARSE
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import argparse
parser = argparse.ArgumentParser(
description="Run churn_delivery XXXXXXXX -c YYYYMMDD",
epilog='Please report bugs and issues to Cristina <[email protected]>')
parser.add_argument('-c', '--closing_day', metavar='<YYYYMMDD>', type=str, required=True,
help='Closing day YYYYMMDD (same used for the car generation)')
parser.add_argument('-s', '--starting_day', metavar='<YYYYMMDD>', type=str, required=False,
help='starting day for car')
parser.add_argument('--tr_ini', metavar='<YYYYMMDD>', type=str, required=False,
help='starting day for training cycle. If not specified, the script compute it automatically')
parser.add_argument('--tr_end', metavar='<YYYYMMDD>', type=str, required=False,
help='ending day for training cycle. If not specified, the script compute it automatically')
parser.add_argument('--check', action='store_true', help='Run the checks. Not compatible with delivery argument')
parser.add_argument('--delivery', action='store_true', help='Run the delivery. Not compatible with check argument')
parser.add_argument('-v', '--version', metavar='<VERSION>', type=str, required=False,
help='Version for metadata table. Default: no version')
parser.add_argument('--horizon', metavar='<integer>', type=int, required=False, default=8,
help='prediction horizon')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# INPUT
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
args = parser.parse_args()
print(args)
closing_day = args.closing_day.split(" ")[0]
starting_day = args.starting_day
tr_cycle_ini = args.tr_ini
tr_cycle_end = args.tr_end
check = args.check
delivery = args.delivery
metadata_version = args.version
horizon = args.horizon
from pykhaos.utils.date_functions import move_date_n_cycles, move_date_n_days
if delivery == check:
if logger: logger.critical("Invalid input parameters: delivery={} check={}".format(delivery, check))
sys.exit()
if not closing_day:
print("Closing day is a required argument")
sys.exit()
if not starting_day:
print("starting_day will be computed automatically as: closing_day - 4 cycles")
starting_day = move_date_n_days(move_date_n_cycles(closing_day, n=-4), n=1)
else:
starting_day = starting_day.split(" ")[0]
if not metadata_version:
metadata_version = "1.1"
if not tr_cycle_ini and tr_cycle_end:
if logger: logger.critical("If tr_cycle_end is specified, tr_cycle_ini must be specified. Program will finish here!")
sys.exit()
elif tr_cycle_ini and not tr_cycle_end:
if logger: logger.critical("If tr_cycle_ini is specified, tr_cycle_end must be specified. Program will finish here!")
sys.exit()
elif tr_cycle_ini and tr_cycle_end:
tr_cycle_ini =tr_cycle_ini.split(" ")[0]
tr_cycle_end = tr_cycle_end.split(" ")[0]
# check input values
if tr_cycle_ini < tr_cycle_end:
if logger: logger.critical("tr_ini must be <= than tr_end. Program will finish here!")
sys.exit()
from pykhaos.utils.date_functions import move_date_n_cycles, count_nb_cycles
today_str = dt.datetime.today().strftime("%Y%m%d")
if count_nb_cycles(tr_cycle_end, closing_day) < horizon:
if logger: logger.critical("tr_end ({}) must be > horizon ({}). ".format(tr_cycle_end, horizon))
if logger: logger.critical(" Hint: tr_end={} could be a valid value for a horizon={}".format(move_date_n_cycles(today_str, n=-(horizon+1)), horizon))
if logger: logger.critical("Program will finish here!")
sys.exit()
CLOSING_DAY = closing_day
from pykhaos.utils.date_functions import move_date_n_cycles
prev_closing_day = move_date_n_cycles(CLOSING_DAY, n=-1)
if logger: logger.info("")
if logger: logger.info("")
if logger: logger.info("")
if logger: logger.info("")
if logger: logger.info("- - - - - - - - - - - - - - - - - - - - - - - - ")
if logger: logger.info("Input params:")
if logger: logger.info(" closing_day='{}'".format(CLOSING_DAY))
if logger: logger.info(" starting_day='{}'".format(starting_day))
if logger: logger.info(" tr_ini_cycle={}".format(tr_cycle_ini))
if logger: logger.info(" tr_end_cycle={}".format(tr_cycle_end))
if logger: logger.info(" metadata_version='{}'".format(metadata_version))
if logger: logger.info(" delivery={}".format(delivery))
if logger: logger.info(" check={}".format(check))
if logger: logger.info("- - - - - - - - - - - - - - - - - - - - - - - - ")
if logger: logger.info("")
if logger: logger.info("")
if logger: logger.info("")
if logger: logger.info("")
from churn.delivery.time_logger import TimeLogger
time_logger = TimeLogger(process_name="churn_delivery_{}".format(closing_day))
from churn.utils.general_functions import init_spark
spark = init_spark("churn_delivery")
sc = spark.sparkContext
from pykhaos.utils.scala_wrapper import get_scala_sc
scala_sc = get_scala_sc(spark)
from churn.delivery.checkers_class import check_car, check_car_preparado, check_extra_feats, check_sql, \
check_scores_new, check_join_car, check_levers_model
from churn.delivery.printers_class import format_table_msg, format_table_msg
import time
start_time_total = time.time()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# CHECK SECTION
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if logger: logger.info("Entering check section... \n")
from churn.delivery.checkers_class import checker, check_join_car, check_extra_feats, check_scores_new, \
check_levers_model, check_car, check_car_preparado, check_delivery_file
from churn.delivery.printers_class import * # print_msg, format_modules_msg, SECTION_CAR, format_table_msg
from functools import partial
checker_car_obj = checker(spark, partial(check_car, spark, closing_day), partial(format_modules_msg), closing_day,
SECTION_CAR)
checker_join_prev_car_obj = checker(spark, partial(check_join_car, spark, prev_closing_day),
partial(format_table_msg), closing_day, SECTION_JOIN_CAR)
checker_extra_feats_obj = checker(spark, partial(check_extra_feats, spark, closing_day),
partial(format_modules_msg), closing_day, SECTION_EXTRA_FEATS)
checker_prepared_car_obj = checker(spark, partial(check_car_preparado, spark, closing_day),
partial(format_table_msg), closing_day, SECTION_CAR_PREPARADO)
checker_scores_onlymob_obj = checker(spark, partial(check_scores_new, spark, closing_day, segment="onlymob",
tr_cycle_ini=tr_cycle_ini,
tr_cycle_end=tr_cycle_end), partial(format_table_msg),
closing_day, SECTION_SCORES_NEW + " " + "onlymob")
checker_scores_mobileandfbb_obj = checker(spark,
partial(check_scores_new, spark, closing_day, segment="mobileandfbb",
tr_cycle_ini=tr_cycle_ini,
tr_cycle_end=tr_cycle_end), partial(format_table_msg),
closing_day, SECTION_SCORES_NEW + " " + "mobileandfbb")
checker_dp_levers_obj = checker(spark, partial(check_levers_model, spark, closing_day), partial(format_table_msg),
closing_day, SECTION_DP_LEVERS)
checker_delivery_file_obj = checker(spark, partial(check_delivery_file, closing_day, tr_cycle_ini, tr_cycle_end, horizon),
partial(format_table_msg), closing_day, SECTION_DELIV_FILE)
checkers_list = [checker_car_obj, checker_join_prev_car_obj, checker_extra_feats_obj, checker_prepared_car_obj,
checker_scores_onlymob_obj, checker_scores_mobileandfbb_obj, checker_dp_levers_obj, checker_delivery_file_obj]
if logger: logger.info("\n\n")
if logger: logger.info("* " * 30)
def run_checker(checkers_list):
for checker_obj in checkers_list:
checker_obj.check_func()
def print_checkers(checkers_list):
for checker_obj in checkers_list:
checker_obj.print_msg()
def check_all_ok(checkers_list):
return all([checker_obj.status for checker_obj in checkers_list])
run_checker(checkers_list)
print_checkers(checkers_list)
message_ok = "\n\nEverything is ready! Enjoy :)"
message_ko = "\n\nSome steps were missing :("
if check_all_ok(checkers_list):
logger.info(message_ok)
else:
logger.info(message_ko)
if logger: logger.info("\n\n")
if logger: logger.info("* " * 30)
print(delivery)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# DELIVERY SECTION
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if delivery:
if logger: logger.info("Entering delivery section... \n")
# . . . . . .
# CAR
# . . . . . .
# car_ok, car_msg, modules_dict = check_car(spark, closing_day, verbose=False)
if not checker_car_obj.status:
from churn.others.run_amdocs_car import amdocs_car_main_custom
amdocs_car_main_custom(closing_day, starting_day, spark, checker_car_obj.extra_info, time_logger)
checker_car_obj.check_func()
if not checker_car_obj.status:
if logger: logger.error("Car step was not finished! Stopping program")
if logger: logger.error(checker_car_obj.print_msg())
sys.exit()
# . . . . . .
# EXTRA FEATS
# . . . . . .
if not checker_extra_feats_obj.status:
from churn.datapreparation.app.generate_table_extra_feats_new import generate_extra_feats, MODULE_JOIN, \
join_dataframes, get_modules_list_by_version
include_ef_modules = [mod for mod, doit in checker_extra_feats_obj.extra_info.items() if doit and mod != MODULE_JOIN]
print("include_ef_modules {}".format(",".join(include_ef_modules)))
generate_extra_feats(spark, closing_day, include_modules=include_ef_modules, metadata_version="1.1",
logger=logger, time_logger=time_logger)
if checker_extra_feats_obj.extra_info[MODULE_JOIN]:
if logger: logger.info("Requested join of extra feats")
modules_list = get_modules_list_by_version(metadata_version="1.1")
if logger: logger.info("Join will include the following modules: {}".format(modules_list))
join_dataframes(spark, closing_day, metadata_version="1.1", include_modules=modules_list)
else:
print("do not perform join")
checker_extra_feats_obj.check_func()
if not checker_extra_feats_obj.status:
if logger: logger.error("Extra feats process was not finished! Stopping program")
if logger: logger.error(checker_extra_feats_obj.msg)
sys.exit()
# . . . . . .
# JOIN CAR of previous month
# . . . . . .
if not checker_join_prev_car_obj.status:
if logger: logger.error("Join car must exist at this point!! Generating it on the fly....")
start_time = time.time()
if time_logger: time_logger.register_time(spark, "generateCar", closing_day, start_time, -1)
# "select * from tests_es.jvmm_amdocs_ids_" + process_date
sql_join_car = sc._jvm.datapreparation.AmdocsCarPreprocessor.generateCar(scala_sc, closing_day)
if time_logger: time_logger.register_time(spark, "generateCar", closing_day, start_time, time.time())
# Check if the join of the amdocs car is ready tests_es.db/jvmm_amdocs_ids_20190321;'
#join_car_ok, join_car_msg = check_join_car(spark, prev_closing_day, verbose=False)
checker_join_prev_car_obj.check_func()
if not checker_join_prev_car_obj.status:
if logger: logger.error("join_car process was not finished! Stopping program")
if logger: logger.error(checker_join_prev_car_obj.msg)
sys.exit()
# . . . . . .
# CAR PREPARADO
# . . . . . .
if not checker_prepared_car_obj.status:
start_time = time.time()
if time_logger: time_logger.register_time(spark, "storePreparedFeatures", closing_day, start_time, -1)
sql_car_preparado = sc._jvm.datapreparation.AmdocsCarPreprocessor.storePreparedFeatures(scala_sc, closing_day)
if time_logger: time_logger.register_time(spark, "storePreparedFeatures", closing_day, start_time, time.time())
checker_prepared_car_obj.check_func(check_sql, spark, sql_car_preparado)
if not checker_prepared_car_obj.status:
if logger: logger.error("Car preparado process was not finished! Stopping program")
if logger: logger.error(checker_prepared_car_obj.msg)
sys.exit()
# . . . . . .
# SCORES ONLYMOB
# . . . . . .
if not checker_scores_onlymob_obj.status:
segmentfilter = "onlymob"
if not tr_cycle_ini and not tr_cycle_end and delivery:
tr_cycle_ini, tr_cycle_end = set_training_cycles(spark, closing_day, segmentfilter)
start_time = time.time()
extra_info = "closing_day={}|segmentfilter={}|tr_cycle_ini={}|tr_cycle_end={}".format(closing_day,
segmentfilter, tr_cycle_ini, tr_cycle_end)
if time_logger: time_logger.register_time(spark, "getModelPredictions_{}".format(segmentfilter), closing_day, start_time, -1, extra_info)
sql_onlymob = sc._jvm.modeling.churn.ChurnModelPredictor.getModelPredictions(scala_sc, tr_cycle_ini,
tr_cycle_end, closing_day,
segmentfilter)
if time_logger: time_logger.register_time(spark, "getModelPredictions_{}".format(segmentfilter), closing_day, start_time, time.time(), extra_info)
checker_scores_onlymob_obj.check_func(check_sql, spark, sql_onlymob)
if not checker_scores_onlymob_obj.status:
if logger: logger.error("Scores onlymob process was not finished! Stopping program")
if logger: logger.error(checker_scores_onlymob_obj.status)
sys.exit()
# . . . . . .
# SCORES MOBILEANDFBB
# . . . . . .
if not checker_scores_mobileandfbb_obj.status:
segmentfilter = "mobileandfbb"
if not tr_cycle_ini and not tr_cycle_end and delivery:
tr_cycle_ini, tr_cycle_end = set_training_cycles(spark, closing_day, segmentfilter)
extra_info = "closing_day={}|segmentfilter={}|tr_cycle_ini={}|tr_cycle_end={}".format(closing_day,
segmentfilter, tr_cycle_ini, tr_cycle_end)
start_time = time.time()
if time_logger: time_logger.register_time(spark, "getModelPredictions_{}".format(segmentfilter), closing_day, start_time, -1)
sql_mobileandfbb = sc._jvm.modeling.churn.ChurnModelPredictor.getModelPredictions(scala_sc, tr_cycle_ini,
tr_cycle_end, closing_day,
segmentfilter)
if time_logger: time_logger.register_time(spark, "getModelPredictions_{}".format(segmentfilter), closing_day, start_time, time.time())
checker_scores_mobileandfbb_obj.check_func(check_sql, spark, sql_mobileandfbb)
if not checker_scores_mobileandfbb_obj.status:
if logger: logger.error("Scores mobileandfbb process was not finished! Stopping program")
if logger: logger.error(checker_scores_mobileandfbb_obj.status)
sys.exit()
# . . . . . .
# DATA PREPARATION - LEVERS MODEL
if not checker_dp_levers_obj.status:
from churn.datapreparation.app.ccc_data_preparation_main import start_process_config
from churn.config_manager.ccc_model_config_mgr import build_config_dict
for segment, do_it in checker_dp_levers_obj.extra_info.items():
if not do_it:
continue
start_time = time.time()
if time_logger: time_logger.register_time(spark, "df_levers_{}".format(segment), closing_day, start_time, -1)
my_config = build_config_dict(segment, agg_by='msisdn', ccc_days=-60, closing_day=closing_day, enabled=True,
end_port='None', labeled=False, model_target='comercial', new_car_version=False,
save_car=True,
segment_filter=segment, start_port='None')
start_process_config(spark, my_config)
if time_logger: time_logger.register_time(spark, "df_levers_{}".format(segment), closing_day, start_time, time.time())
checker_dp_levers_obj.check_func()
if not checker_dp_levers_obj.status:
if logger: logger.error("Data preparation for levers model was not finished! Stopping program")
if logger: logger.error(checker_dp_levers_obj.msg)
sys.exit()
# . . . . . .
# FINAL STEP
# . . . . . .
if check_all_ok(checkers_list[:-1]):#exclude delivery file check
if logger: logger.info("Ready to start the latest step of the delivery...")
from churn.others.run_churn_delivery_scores_incidences_levers_master import run_delivery
start_time = time.time()
if time_logger: time_logger.register_time(spark, "run_delivery", closing_day, start_time, -1)
from churn.utils.constants import DELIVERY_FILENAME
delivery_filename = DELIVERY_FILENAME.format(closing_day, tr_cycle_ini, tr_cycle_end, horizon)
run_delivery(spark, closing_day, checker_scores_onlymob_obj.extra_info, checker_scores_mobileandfbb_obj.extra_info, delivery_filename, logger)
if logger: logger.info("Finished the latest step of the delivery...")
if time_logger: time_logger.register_time(spark, "run_delivery", closing_day, start_time, time.time())
else:
if logger: logger.error("something when wrong... please review...")
sys.exit()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# FINISHED
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if logger: logger.info("TOTAL TIME Process finished - {} minutes".format((time.time() - start_time_total) / 60.0))
if logger: logger.info("Process ended successfully. Enjoy :)")
| [
"[email protected]"
] | |
d2b1b651ebf6b8b808ed94202ed67b4c9425c296 | e44d00ffcea03f8656c40b3d4d993d51a38af3b0 | /leetcode/July/J15_ReverseWords.py | 7c56ae8d6499aa463fa5c626d3acfd51ddc0ed3f | [] | no_license | Ayushmanglani/competitive_coding | d6beec4f2b24aef34ea44c3a4a72074985b4a766 | 12325b09ae2bc6b169578b6a0a091069e14c9227 | refs/heads/master | 2023-06-12T04:43:41.130774 | 2021-07-03T13:01:37 | 2021-07-03T13:01:37 | 262,079,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | class Solution:
def reverseWords(self, s: str) -> str:
s = s.split()
return(" ".join(s[::-1]))
#method 2
class Solution:
def reverseWords(self, s: str) -> str:
s = s.split(" ")
s = s[::-1]
res = ""
for i in s:
if i != "":
res += i + " "
return(res.strip()) | [
"[email protected]"
] | |
b8d73b63c2697f898dd70e716bd7a8aef651e60c | 6bd71bdfe9234e5e6de90bb40b6cd8d3e25ca6d2 | /Tier3-Data/ViaSRM/copy-files-SRM-2-SRM.py | ff8770089328f651fa01741ca337dc1b036f041c | [] | no_license | andres0sorio/CMSWork | f1f30a12bf43eb688ef9e95c53c94fe32fc7fe66 | 81e60a0a9b70cd2ae01d17b15be386a6cd925416 | refs/heads/master | 2021-01-22T13:12:16.094247 | 2015-10-26T04:47:12 | 2015-10-26T04:47:12 | 9,710,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | #!/usr/bin/python
import os,sys
import string
from optparse import OptionParser
# ...
# Andres Osorio - [email protected]
# ...
#-----------------------------------------------------
#-----------------------------------------------------
parser = OptionParser()
parser.add_option("-f", type = "string", dest="infile",
help="List of LFNs" )
parser.add_option("-d", type = "string", dest="debug",
help="Are you in debug mode?" )
(options, args) = parser.parse_args()
if options.infile is None:
parser.error("please provide a file")
if options.debug is None:
parser.error("please, are you in debug mode?")
#-----------------------------------------------------
#-----------------------------------------------------
infile = options.infile
debug = options.debug
print 'Copying files: '
fh = open(infile)
nfiles = 0
#cpmethod = 'srmcp '
#cpopts = '-srm_protocol_version=2 -use_urlcopy_script=true -urlcopy= ${SRM_PATH}/sbin/url-copy.sh -debug'
cpmethod = 'lcg-cp '
cpopts = ''
#............... TO
tprefix = ' srm://moboro.uniandes.edu.co:8446/srm/managerv2\?SFN=/dpm/uniandes.edu.co/home/cms'
tdir = '/user/a/aosorio/gridfiles/RAW/Run2011B/L1Accept/'
#............... FROM
sprefix = ' srm://cmssrm.fnal.gov:8443/srm/managerv2\?SFN=/11'
while 1:
line = fh.readline()
if not line:
break
lfn = line[8:-3]
info = lfn.split('/')
filename = info[-1]
source = sprefix + lfn
target = tprefix + tdir + filename
command = cpmethod + cpopts + source + target
print command
if debug == 'no':
os.system(command)
nfiles+=1
if debug == 'yes':
break
print 'Done. Total files: ', (nfiles-1)
| [
"[email protected]"
] | |
9bf77a66acd94e715057b64921810fafc903c148 | ce68e6f989ebf72becce3638dc920fc60edec45c | /SWEA/D3/SWEA_2814_최장경로.py | 6082630ee9fd834346af54032643eb3679e71238 | [] | no_license | hyeinkim1305/Algorithm | 103513e502241a619a60e6663ed8346e5c574ebc | 93541b01fab0a6ceb6f9dd06a7c049c8b57d94f9 | refs/heads/master | 2023-05-09T19:07:58.280912 | 2021-06-10T01:51:29 | 2021-06-10T01:51:29 | 330,079,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py |
# D3
# 두 정점 사이에 여러 개의 간선 존재?
def dfs(idx, cnt):
global max_cnt
vis[idx] = 1
if cnt > max_cnt:
max_cnt = cnt
for j in range(1, N+1):
if idx != j and adj[idx][j] == 1:
if vis[j] == 0:
dfs(j, cnt+1)
# 아래 줄이 없으면 한번 씩만 방문함
# 아래 줄을 쓰게 되면 각 정점들을 시작점으로 돌 수 있음
vis[idx] = 0 # 두번째 예시에서, 3에서 시작해서 끝까지 가는 경로를 돌게 됨
#### 느낌에, 출발점이 달라질 수 있는 경우 방문배열을 취소하는 듯
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
adj = [[0] * (N+1) for _ in range(N+1)]
vis = [0] * (N+1) # 각 정점 방문 배열
max_cnt = -1
# 인접행렬 구성성
for _ in range(M):
u, v = map(int, input().split())
adj[u][v] = 1
adj[v][u] = 1
for i in range(1, N+1): # 각 정점에서 시작
if vis[i] == 0:
dfs(i, 1) # 1 : 정점 개수수
print('#{} {}'.format(tc, max_cnt))
'''
2
1 0
3 2
1 2
3 2
'''
'''
1
6 5
1 2
1 3
2 4
2 5
2 6
''' | [
"[email protected]"
] | |
4727b92f82d006d2db8a575d76f06b2f66888c5a | 09912a852e0e20d6a475ef904724f80072a68359 | /eds/MemsIPE/server/openmtc-server/src/openmtc_server/Event.py | 6a3ca161a5ec34a44d4d93e69312d5e4a98fb703 | [
"Apache-2.0"
] | permissive | elastest/elastest-device-emulator-service | 034aa19438383df0975bf86d49e231342d63002f | f512355c5fde6bf027d23558e256b96e2296e0f2 | refs/heads/master | 2021-03-09T15:13:30.676138 | 2020-01-13T12:02:02 | 2020-01-13T12:02:02 | 91,440,225 | 3 | 9 | Apache-2.0 | 2018-12-03T14:59:27 | 2017-05-16T09:26:10 | Python | UTF-8 | Python | false | false | 2,910 | py | from abc import abstractmethod, ABCMeta
from futile import issubclass as safe_issubclass
from futile.logging import LoggerMixin
from openmtc.model import Resource
class Event(LoggerMixin):
__metaclass__ = ABCMeta
@abstractmethod
def fire(self, *event_data):
raise NotImplementedError()
@abstractmethod
def register_handler(self, handler, *args, **kw):
raise NotImplementedError()
class EventSpec(object):
__metaclass__ = ABCMeta
@abstractmethod
def matches(self, item):
raise NotImplementedError()
class BasicEvent(Event):
def __init__(self):
super(BasicEvent, self).__init__()
self._handlers = []
def _add_handler_data(self, data):
handler = data
if handler in self._handlers:
self.logger.warn("Handler %s is already registered", handler)
else:
self._handlers.append(handler)
def register_handler(self, handler, **kw):
self._add_handler_data(handler)
def _execute_handler(self, handler, *event_data):
self.logger.debug("Running handler %s with %s", handler, event_data)
try:
handler(*event_data)
except Exception:
self.logger.exception("Error in event handler")
self.logger.debug("handler %s finished", handler)
def _fired(self, *event_data):
for handler in self._handlers:
self._execute_handler(handler, *event_data)
def fire(self, *event_data):
self.logger.debug("Fired: %s with %s", self, event_data)
self._fired(*event_data)
class ResourceTreeEvent(BasicEvent):
def _add_handler_data(self, data):
resource_type = data[0]
handler = data[1]
# TODO: kca: error messages
if resource_type is not None and not safe_issubclass(resource_type,
Resource):
raise TypeError(resource_type)
if not callable(handler):
raise TypeError(handler)
if data in self._handlers:
self.logger.warn("Handler %s is already registered for type %s",
handler, resource_type or "<all>")
else:
self._handlers.append(data)
def register_handler(self, handler, resource_type=None, **kw):
self._add_handler_data((resource_type, handler))
def _execute_handler(self, data, *event_data):
handler = data[1]
self.logger.debug("Running handler %s with %s", handler, event_data)
handler(*event_data)
self.logger.debug("handler finished")
def _fired(self, resource_type, *event_data):
for data in self._handlers:
handled_type = data[0]
if handled_type is None or issubclass(resource_type, handled_type):
self._execute_handler(data, *event_data)
else:
pass
| [
"sro"
] | sro |
a4977e1faa3a4d326cda4f0f40893a477d82fba0 | 9613620632d8a60afa1ac66a752f2eb543a8530d | /src/posts/apps.py | 139fec2d7e31d75c47e310e121e5c3e2843e2d5b | [] | no_license | loristron/iSocial | 72dc48b36ff1959d98fdb1d4df66495575f9d48a | 417d7a344a9a078d76380c221d32860d8775f36d | refs/heads/master | 2023-02-24T15:46:17.450470 | 2021-02-03T00:52:26 | 2021-02-03T00:52:26 | 335,385,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from django.apps import AppConfig
class PostsConfig(AppConfig):
name = 'posts'
verbose_name = 'Posts, Comments and Likes'
| [
"[email protected]"
] | |
a9fd455aafe7b51c2e12403c2de811af29e4df85 | a0947c2778742aec26b1c0600ceca17df42326cd | /Python/Web/72of79DB.py | 9cd00e6e182ec0bd2fd0d3e52465a807cc2c731d | [] | no_license | JohnCDunn/Course-Work-TTA | 5758319d4607114914ba9723328658bed8fb2024 | 8c4f60d51007dac2ac4cceb84b0f9666e143c0d7 | refs/heads/master | 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import sqlite3
conn = sqlite3.connect('time_database.db')
c = conn.cursor()
c.execute('''CREATE TABLE time_table (last_update text, last_timestamp)''')
c.execute("INSERT INTO time_table VALUES ('1', '20150101010101')")
conn.commit()
conn.close()
| [
"[email protected]"
] | |
b153591361dcaaf26678eb681c08e9c88a1c0a65 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03250/s138356191.py | 9362dfb78672e63c114a03cd0f13887be37978a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | N = list(map(int, input().split()))
print(sum(N) + max(N) * 9) | [
"[email protected]"
] | |
fdd0a283e25c4e450811834e0eb190b19844370c | d93586a23b50027c766be448072e5c06ebd05ffc | /seq2seq/dependency/tf_models/models/official/recommendation/data_preprocessing.py | d29fcc55b7920c235309592474b6a858f38a20d6 | [
"Apache-2.0"
] | permissive | peixiang6134/tf_bot_examples | 9bd3a63bf5e699e49455d3beb91c4995f47d781a | b4e8eb6a555b3eeeb423ad928a9009c41b4e5950 | refs/heads/master | 2021-07-09T02:29:26.303639 | 2020-02-22T08:58:33 | 2020-02-22T08:58:33 | 238,680,285 | 1 | 1 | null | 2021-03-31T21:54:12 | 2020-02-06T12:08:36 | Python | UTF-8 | Python | false | false | 9,747 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess dataset and construct any necessary artifacts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import time
import timeit
import typing
# pylint: disable=wrong-import-order
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
# pylint: enable=wrong-import-order
from dependency.tf_models.models.official.recommendation import constants as rconst
from dependency.tf_models.models.official.recommendation import data_pipeline
from dependency.tf_models.models.official.recommendation import movielens
from dependency.tf_models.models.official.utils.logs import mlperf_helper
DATASET_TO_NUM_USERS_AND_ITEMS = {
"ml-1m": (6040, 3706),
"ml-20m": (138493, 26744)
}
_EXPECTED_CACHE_KEYS = (
rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,
rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)
def _filter_index_sort(raw_rating_path, cache_path):
# type: (str, str, bool) -> (dict, bool)
"""Read in data CSV, and output structured data.
This function reads in the raw CSV of positive items, and performs three
preprocessing transformations:
1) Filter out all users who have not rated at least a certain number
of items. (Typically 20 items)
2) Zero index the users and items such that the largest user_id is
`num_users - 1` and the largest item_id is `num_items - 1`
3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
This allows the dataframe to be sliced by user in-place, and for the last
item to be selected simply by calling the `-1` index of a user's slice.
While all of these transformations are performed by Pandas (and are therefore
single-threaded), they only take ~2 minutes, and the overhead to apply a
MapReduce pattern to parallel process the dataset adds significant complexity
for no computational gain. For a larger dataset parallelizing this
preprocessing could yield speedups. (Also, this preprocessing step is only
performed once for an entire run.
Args:
raw_rating_path: The path to the CSV which contains the raw dataset.
cache_path: The path to the file where results of this function are saved.
Returns:
A filtered, zero-index remapped, sorted dataframe, a dict mapping raw user
IDs to regularized user IDs, and a dict mapping raw item IDs to regularized
item IDs.
"""
valid_cache = tf.io.gfile.exists(cache_path)
if valid_cache:
with tf.io.gfile.GFile(cache_path, "rb") as f:
cached_data = pickle.load(f)
# (nnigania)disabled this check as the dataset is not expected to change
# cache_age = time.time() - cached_data.get("create_time", 0)
# if cache_age > rconst.CACHE_INVALIDATION_SEC:
# valid_cache = False
for key in _EXPECTED_CACHE_KEYS:
if key not in cached_data:
valid_cache = False
if not valid_cache:
logging.info("Removing stale raw data cache file.")
tf.io.gfile.remove(cache_path)
if valid_cache:
data = cached_data
else:
with tf.io.gfile.GFile(raw_rating_path) as f:
df = pd.read_csv(f)
# Get the info of users who have more than 20 ratings on items
grouped = df.groupby(movielens.USER_COLUMN)
df = grouped.filter(
lambda x: len(x) >= rconst.MIN_NUM_RATINGS) # type: pd.DataFrame
original_users = df[movielens.USER_COLUMN].unique()
original_items = df[movielens.ITEM_COLUMN].unique()
# Map the ids of user and item to 0 based index for following processing
logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
lambda user: user_map[user])
df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
lambda item: item_map[item])
num_users = len(original_users)
num_items = len(original_items)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.PREPROC_HP_NUM_EVAL,
value=rconst.NUM_EVAL_NEGATIVES)
assert num_users <= np.iinfo(rconst.USER_DTYPE).max
assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
assert df[movielens.USER_COLUMN].max() == num_users - 1
assert df[movielens.ITEM_COLUMN].max() == num_items - 1
# This sort is used to shard the dataframe by user, and later to select
# the last item for a user to be used in validation.
logging.info("Sorting by user, timestamp...")
# This sort is equivalent to
# df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
# inplace=True)
# except that the order of items with the same user and timestamp are
# sometimes different. For some reason, this sort results in a better
# hit-rate during evaluation, matching the performance of the MLPerf
# reference implementation.
df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True, kind="mergesort")
# The dataframe does not reconstruct indices in the sort or filter steps.
df = df.reset_index()
grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])
data = {
rconst.TRAIN_USER_KEY: train_df[movielens.USER_COLUMN]
.values.astype(rconst.USER_DTYPE),
rconst.TRAIN_ITEM_KEY: train_df[movielens.ITEM_COLUMN]
.values.astype(rconst.ITEM_DTYPE),
rconst.EVAL_USER_KEY: eval_df[movielens.USER_COLUMN]
.values.astype(rconst.USER_DTYPE),
rconst.EVAL_ITEM_KEY: eval_df[movielens.ITEM_COLUMN]
.values.astype(rconst.ITEM_DTYPE),
rconst.USER_MAP: user_map,
rconst.ITEM_MAP: item_map,
"create_time": time.time(),
}
logging.info("Writing raw data cache.")
with tf.io.gfile.GFile(cache_path, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
# TODO(robieta): MLPerf cache clear.
return data, valid_cache
def instantiate_pipeline(dataset,
data_dir,
params,
constructor_type=None,
deterministic=False,
epoch_dir=None,
generate_data_offline=False):
# type: (str, str, dict, typing.Optional[str], bool, typing.Optional[str], bool) -> (int, int, data_pipeline.BaseDataConstructor)
"""Load and digest data CSV into a usable form.
Args:
dataset: The name of the dataset to be used.
data_dir: The root directory of the dataset.
params: dict of parameters for the run.
constructor_type: The name of the constructor subclass that should be used
for the input pipeline.
deterministic: Tell the data constructor to produce deterministically.
epoch_dir: Directory in which to store the training epochs.
generate_data_offline: Boolean, whether current pipeline is done offline
or while training.
"""
logging.info("Beginning data preprocessing.")
st = timeit.default_timer()
raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)
raw_data, _ = _filter_index_sort(raw_rating_path, cache_path)
user_map, item_map = raw_data["user_map"], raw_data["item_map"]
num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
if num_users != len(user_map):
raise ValueError("Expected to find {} users, but found {}".format(
num_users, len(user_map)))
if num_items != len(item_map):
raise ValueError("Expected to find {} items, but found {}".format(
num_items, len(item_map)))
producer = data_pipeline.get_constructor(constructor_type or "materialized")(
maximum_number_epochs=params["train_epochs"],
num_users=num_users,
num_items=num_items,
user_map=user_map,
item_map=item_map,
train_pos_users=raw_data[rconst.TRAIN_USER_KEY],
train_pos_items=raw_data[rconst.TRAIN_ITEM_KEY],
train_batch_size=params["batch_size"],
batches_per_train_step=params["batches_per_step"],
num_train_negatives=params["num_neg"],
eval_pos_users=raw_data[rconst.EVAL_USER_KEY],
eval_pos_items=raw_data[rconst.EVAL_ITEM_KEY],
eval_batch_size=params["eval_batch_size"],
batches_per_eval_step=params["batches_per_step"],
stream_files=params["stream_files"],
deterministic=deterministic,
epoch_dir=epoch_dir,
create_data_offline=generate_data_offline)
run_time = timeit.default_timer() - st
logging.info("Data preprocessing complete. Time: {:.1f} sec."
.format(run_time))
print(producer)
return num_users, num_items, producer
| [
"[email protected]"
] | |
5807b840bdeeec376fc32e131f19b981354fc4c6 | 735bc5756b554009c552844616183b2bcf9ac50b | /sauna/reload/interfaces.py | 3b5b4b263f04e0e9abe866d5e57392c026b534f8 | [
"ZPL-2.1"
] | permissive | collective/sauna.reload | 31a2e3be956a9ef2168016e9f199c4b3d1c0b052 | e12a0a9e01204de324ab934aec5754773ac30bd6 | refs/heads/master | 2023-03-22T11:29:17.651087 | 2018-10-18T11:15:02 | 2018-10-18T11:15:02 | 2,267,806 | 10 | 1 | null | 2014-10-19T14:24:33 | 2011-08-25T12:40:05 | Python | UTF-8 | Python | false | false | 1,779 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2011 University of Jyväskylä and Contributors.
#
# All Rights Reserved.
#
# Authors:
# Esa-Matti Suuronen <[email protected]>
# Asko Soukka <[email protected]>
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
"""ZCA Interface Definitions"""
from zope.interface import Interface, Attribute
class IDatabaseHooks(Interface):
"""
Provides storage-specific hooks to be called during the reload.
"""
def prepareForReload():
"""
Is called before the reload (before the process is killed)
to allow database connection be prepared for it.
"""
def resumeFromReload():
"""
Is called after the reload (after a new process has been spawned)
to allow database connection be restored.
"""
class INewChildForked(Interface):
"""
Emited immediately after new process is forked. No development packages
have been yet installed.
Useful if you want to do something before your code gets loaded.
Note that you cannot listen this event on a package that is marked for
reloading as it is not yet installed when this is fired.
"""
forkloop = Attribute('ForkLoop instance')
class INewChildIsReady(Interface):
"""
Emitted when all the development packages has been installed to the new
forked child.
Useful for notifications etc.
"""
forkloop = Attribute('ForkLoop instance')
| [
"[email protected]"
] | |
5e6b20b5c16d81d1f4479bc5620f2397b8e7486d | 76b0fad21d63847896e09b0c4792637ae2b1c460 | /src/cookbook_10.py | 912825055e51f3438d430db57449e5901cd71756 | [] | no_license | hezhiqiang-book/python-cookbook | 0718056aff37648246958fc38dffde66bc9df40a | a432835755a9a8703789890018561c4218a42259 | refs/heads/master | 2020-12-09T15:53:43.316368 | 2015-11-29T02:56:38 | 2015-11-29T02:56:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Counter(object):
def __init__(self):
self.dict = {}
def add(self, item):
count = self.dict.setdefault(item, 0)
self.dict[item] = count + 1
def counts(self, desc=None):
result = [[val, key] for (key, val) in self.dict.items()]
result.sort()
if desc:
result.reverse()
return result
if __name__ == '__main__':
'''Produces:
>>> Ascending count:
[[1, 'but'], [1, 'it'], [1, 'not.'], [1, 'now'], [1, 'test,'], [1, 'test.'], [1, 'was'], [2, 'Hello'], [2, 'a'], [2, 'is'], [2, 'there'], [2, 'this']]
Descending count:
[[2, 'this'], [2, 'there'], [2, 'is'], [2, 'a'], [2, 'Hello'], [1, 'was'], [1, 'test.'], [1, 'test,'], [1, 'now'], [1, 'not.'], [1, 'it'], [1, 'but']]
'''
sentence = "Hello there this is a test. Hello there this was a test, but now it is not."
words = sentence.split()
c = Counter()
for word in words:
c.add(word)
print "Ascending count:"
print c.counts()
print "Descending count:"
print c.counts(1)
| [
"[email protected]"
] | |
043315ef85ed72f41f1faddc9dab297d8fb220e9 | bc435c8e7e8e418015e4848bb196adeac1343855 | /tools/spider/getItem.py | b53c0b6bc94a57170f41aef5772d7fa577a6dee3 | [] | no_license | xiaoZ-hc/PwnMe | 13a580268273dfaa2957c70fe485475fc8a42bb4 | 2269b9461f6bf8b5a6f600801a03bb60da3adadc | refs/heads/master | 2020-09-05T00:58:28.343371 | 2018-08-01T08:47:15 | 2018-08-01T08:47:15 | 219,939,668 | 0 | 1 | null | 2019-11-06T07:44:20 | 2019-11-06T07:44:19 | null | UTF-8 | Python | false | false | 2,733 | py | #!/usr/bin/env python
# encoding:utf-8
import requests
import bs4
import time
import sys
import os
headers = {
"Host" : "www.exploit-db.com",
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0",
"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language" : "en-US,en;q=0.5",
"Accept-Encoding" : "gzip, deflate, br",
"Connection" : "keep-alive",
"Upgrade-Insecure-Requests" : "1",
"Pragma" : "no-cache",
"Cache-Control" : "no-cache"
}
url = sys.argv[1] # 第一个参数指定 url
pages_type = sys.argv[2] # 指定类型 主要为了确定要将文件保存在哪儿
# url = "https://www.exploit-db.com/exploits/39617/"
response = requests.get(url, headers=headers)
content = response.text.encode("UTF-8")
soup = bs4.BeautifulSoup(content, "html.parser")
def getType(soup):
result = ""
return result
def getCode(soup):
result = soup.find("pre").text
return result
def getAuthor(soup):
result = soup.findAll(attrs={"name":"author"})[0]["content"]
return result
def getDescribe(soup):
result = soup.findAll(attrs={"name":"description"})[0]["content"]
return result
def getTitle(soup):
result = soup.title.string
return result
def save(path, content):
codefile = open(path, 'w')
codefile.write(content.encode("UTF-8"))
codefile.close()
def formate(filename):
filename = filename.replace(" ","_")
filename = filename.replace("[","_")
filename = filename.replace("]","_")
filename = filename.replace("(","_")
filename = filename.replace(")","_")
filename = filename.replace("/","_")
filename = filename.replace("\\","_")
filename = filename.replace("{","_")
filename = filename.replace("}","_")
filename = filename.replace(".","_")
filename = filename.replace("\"","_")
filename = filename.replace("'","_")
if filename.endswith("_"):
filename = filename[:-1]
return filename
def compile(path):
command = "gcc -g -fno-stack-protector -z execstack " + path + " -o " + path[:-2] + ".out"
os.system(command)
def commit(comment):
command = "git add ."
os.system(command)
command = "git commit -m \"" + comment + "\""
print command
os.system(command)
print "Getting title..."
dirname = getTitle(soup)
dirname = formate(dirname)
dirname = "./cache/" + pages_type + "/" + dirname
file_content = getCode(soup)
print "Making dir..."
os.makedirs(dirname)
print "Saving file..."
save(dirname + "/" + "main.c", file_content)
comment = getDescribe(soup).replace("\"","")
commit(comment)
# print "Compiling..."
# compile(dirname + "/" + filename)
| [
"[email protected]"
] | |
a5a3e0a1a6d2bee081cc972ef984f9da33515eb9 | 63768dc92cde5515a96d774a32facb461a3bf6e9 | /jacket/api/compute/openstack/compute/certificates.py | 61aaa716f6f6ade322e2c850f6dd52375a387db5 | [
"Apache-2.0"
] | permissive | ljZM33nd/jacket | 6fe9156f6f5789e5c24425afa7ce9237c302673d | d7ad3147fcb43131098c2a5210847634ff5fb325 | refs/heads/master | 2023-04-16T11:02:01.153751 | 2016-11-15T02:48:12 | 2016-11-15T02:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,281 | py | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
import jacket.compute.cert.rpcapi
from jacket.compute import exception
from jacket.i18n import _
ALIAS = "os-certificates"
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
'private_key': private_key,
}
class CertificatesController(wsgi.Controller):
"""The x509 Certificates API controller for the OpenStack API."""
def __init__(self):
self.cert_rpcapi = jacket.compute.cert.rpcapi.CertAPI()
super(CertificatesController, self).__init__()
@extensions.expected_errors((404, 501))
def show(self, req, id):
"""Return certificate information."""
context = req.environ['compute.context']
authorize(context, action='show')
if id != 'root':
msg = _("Only root certificate can be retrieved.")
# TODO(oomichi): This seems a HTTPBadRequest case because of the
# above message. This will be changed with a microversion in the
# future.
common.raise_feature_not_supported(msg=msg)
try:
cert = self.cert_rpcapi.fetch_ca(context,
project_id=context.project_id)
except exception.CryptoCAFileNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
# +microversions because the resource certificate has been created
# completely when returning a response.
@extensions.expected_errors(())
def create(self, req, body=None):
"""Create a certificate."""
context = req.environ['compute.context']
authorize(context, action='create')
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
user_id=context.user_id, project_id=context.project_id)
return {'certificate': _translate_certificate_view(cert, pk)}
class Certificates(extensions.V21APIExtensionBase):
"""Certificates support."""
name = "Certificates"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS,
CertificatesController(),
member_actions={})]
return resources
def get_controller_extensions(self):
return []
| [
"[email protected]"
] | |
bcb2e8de18e2d1eac6fd24c457f468bfab851648 | 6be845bf70a8efaf390da28c811c52b35bf9e475 | /windows/Resources/Dsz/PyScripts/Lib/dsz/mca_dsz/file/cmd/get/types.py | 869ee75deb26cb39c8dca695a2111e7c46c17f43 | [] | no_license | kyeremalprime/ms | 228194910bf2ed314d0492bc423cc687144bb459 | 47eea098ec735b2173ff0d4e5c493cb8f04e705d | refs/heads/master | 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: types.py
from types import *
MSG_KEY_PARAMS = 65536
MSG_KEY_PARAMS_RAW_INDEX = 65537
MSG_KEY_PARAMS_OFFSET = 65538
MSG_KEY_PARAMS_BYTES_TO_READ = 65539
MSG_KEY_PARAMS_MAX_FILES = 65540
MSG_KEY_PARAMS_CHUNK_SIZE = 65541
MSG_KEY_PARAMS_FLAGS = 65542
MSG_KEY_PARAMS_DATE_TYPE = 65543
MSG_KEY_PARAMS_AGE = 65544
MSG_KEY_PARAMS_AFTER_TIME = 65545
MSG_KEY_PARAMS_BEFORE_TIME = 65546
MSG_KEY_PARAMS_MASK = 65547
MSG_KEY_PARAMS_PATH = 65548
MSG_KEY_PARAMS_MINIMUM_SIZE = 65549
MSG_KEY_PARAMS_MAXIMUM_SIZE = 65550
MSG_KEY_PARAMS_FILE_PROVIDER = 65551
MSG_KEY_RESULT_FILE_INFO = 131072
MSG_KEY_RESULT_FILE_INFO_INDEX = 131073
MSG_KEY_RESULT_FILE_INFO_FILE_SIZE = 131074
MSG_KEY_RESULT_FILE_INFO_CREATE_TIME = 131075
MSG_KEY_RESULT_FILE_INFO_ACCESS_TIME = 131076
MSG_KEY_RESULT_FILE_INFO_MODIFY_TIME = 131077
MSG_KEY_RESULT_FILE_INFO_OPEN_STATUS = 131078
MSG_KEY_RESULT_FILE_INFO_OFFSET = 131079
MSG_KEY_RESULT_FILE_INFO_NAME = 131080
MSG_KEY_RESULT_FILE_INFO_FLAGS = 131081
MSG_KEY_RESULT_DATA = 196608
MSG_KEY_RESULT_DATA_INDEX = 196609
MSG_KEY_RESULT_DATA_BUFFER = 196610
MSG_KEY_RESULT_DONE = 262144
MSG_KEY_RESULT_DONE_INDEX = 262145 | [
"[email protected]"
] | |
a9fb728e80d05f1c60a425baa7e4130d5f496aa1 | 22b363b0a4d67427d4746414090d6fef701fd3b9 | /src/triage/__init__.py | f148179bcdd93eca35d0d477df9f079e43aba6e1 | [] | no_license | Barbarioli/triage | 12557dd410def01291da355051966b4d2e5885b4 | 5d1ef41f1d89b150b30fae0c300028ef8dc39321 | refs/heads/master | 2020-03-13T10:35:48.469855 | 2018-04-14T02:29:03 | 2018-04-14T02:29:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | # -*- coding: utf-8 -*-
__author__ = """Center for Data Science and Public Policy"""
__email__ = '[email protected]'
__version__ = '2.2.0'
| [
"[email protected]"
] | |
b13b2d3ef3319d540e3e1ead4ca55bb548c596ab | 3bb4d1c4060a8d0f30cbfa01328a3b1520ce1bd5 | /apps/system/migrations/0002_auto_20190404_2012.py | 5c220e6021475f5576c584f224796a70d5180679 | [] | no_license | Chen320048/HuoChenGuang_Files | 8750bc7afe871128d7ae526741da83a2ac485ce4 | f4f847572a69ecad1f8ab22c7364cbffe70571fc | refs/heads/master | 2020-05-31T09:05:37.257383 | 2019-06-04T13:18:47 | 2019-06-04T13:18:47 | 190,204,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2019-04-04 20:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('system', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bizlog',
options={'default_permissions': (), 'ordering': ['-id'], 'permissions': (('view_visitor', '\u8bbf\u95ee\u7ec8\u7aef\u7edf\u8ba1'), ('manage_version', '\u7248\u672c\u7ba1\u7406'), ('view_feedback', '\u5efa\u8bae\u53cd\u9988'), ('view_log', '\u7cfb\u7edf\u65e5\u5fd7'), ('manage_vcode', '\u7cfb\u7edf\u9a8c\u8bc1\u7801\u7ba1\u7406')), 'verbose_name': '\u7cfb\u7edf'},
),
migrations.AlterModelOptions(
name='feedback',
options={'default_permissions': (), 'ordering': ['-id'], 'verbose_name': '\u7528\u6237\u53cd\u9988'},
),
migrations.AlterModelOptions(
name='vcode',
options={'default_permissions': (), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='version',
options={'default_permissions': (), 'ordering': ['-id'], 'verbose_name': '\u7248\u672c\u4fe1\u606f'},
),
migrations.AlterModelOptions(
name='visitor',
options={'default_permissions': (), 'ordering': ['-id']},
),
]
| [
"[email protected]"
] | |
f462aa831af9c6ef63296255f49b337746e77814 | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/yuikitaml/method-examples/method-examples.py | bbf85edccd7069000af9984fb7999be1f545bd7f | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | #!/usr/bin/env python
# coding: utf-8
# ## Method examples quick look
# Using Titanic dataset
# In[ ]:
import pandas as pd
import numpy as np
import re
import sklearn
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
GradientBoostingClassifier, ExtraTreesClassifier)
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
PassengerId = test['PassengerId']
train.head(3)
# In[ ]:
# Count NaN
train.Cabin.isnull().sum()
# In[ ]:
# Numerise Sex
result = train.Sex.map({'male': 0, 'female': 1})
result.head(3)
# In[ ]:
# Fill na
result = train.Cabin.fillna('S')
result.head(3)
# In[ ]:
| [
"[email protected]"
] | |
a813e9d7480febc65bed94dcd2c611f754601bb5 | d28d47bc1d25161629d2174a465ae3eb38e02802 | /myapp/migrations/0002_auto_20190904_1909.py | 86cd65d9520221dee1b35b5c4957107f4966576a | [] | no_license | Lider-neuromedia/opx-opensource-web | 4df776607d701f0fe89ad1fb178502ec089678be | b44bb24830e52df9a641181f1ab644e9da08ff1d | refs/heads/master | 2022-06-05T01:25:28.312145 | 2020-04-29T22:23:41 | 2020-04-29T22:23:41 | 260,053,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # Generated by Django 2.2.4 on 2019-09-04 19:09
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='usuario',
old_name='userpassword',
new_name='password',
),
migrations.AlterField(
model_name='accion',
name='accionid',
field=models.UUIDField(default=uuid.UUID('50258478-ee9f-42e7-90bc-4e70fc047c0b'), editable=False, primary_key=True, serialize=False),
),
]
| [
"="
] | = |
ea8d95bb65ad2ff1b41bf10867f308283fee4d22 | ee86ad4b38f6ba13f195246f14224ba781f933cc | /02_2차원배열/2차원 순회.py | bf7faa2b62022360c75ed078fa6eac2111d6d63e | [] | no_license | yejikk/Algorithm | aed7adf00c1e32d21b735b3b34dc6cb75049f164 | 531f43305b3a23c824c9e153151b7280c1dc2535 | refs/heads/master | 2020-04-17T06:17:28.961656 | 2019-11-16T08:02:49 | 2019-11-16T08:02:49 | 166,318,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | arr = [[0,1,2,3],
[4,5,6,7],
[8,9,10,11]]
# i : 행의 좌표
n = len(arr)
# j : 열의 좌표,
m = len(arr[0])
# 행 우선 순회
for i in range(len(arr)):
for j in range(len(arr[i])):
print(arr[i][j], end=' ')
print()
print()
# 열 우선 순회
for j in range(len(arr[0])):
for i in range(len(arr)):
print(arr[i][j], end=' ')
print()
print()
# 지그재그 순회
for i in range(len(arr)) :
for j in range(len(arr[0])):
print(arr[i][j + (m-1-2*j) * (i%2)], end=' ')
print()
print()
| [
"[email protected]"
] | |
ded0ba095bd9d4078345e5fdce6768005fe044fe | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2453486_0/Python/Zolmeister1/tictac.py | 2879fa0314f0c731fcb36909d595b1a0d320f8b9 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py | fin = open('tictac.in').read()[2:].strip().split('\n\n')
fout = open('tictac.out','w')
def pp(s):#print end case
print 'Case #%d: ' % (cnt) +s
fout.write('Case #%d: ' % (cnt) +s+'\n')
cnt=0
for game in fin:
cnt+=1
rows = game.split('\n')
cont = False
for r in rows:
os = r.count('O')
xs = r.count('X')
ts = r.count('T')
if os==4 or os==3 and ts==1:
pp('O won')
cont = True
break
elif xs==4 or xs==3 and ts==1:
pp('X won')
cont = True
break
if cont:
continue
cols = zip(*rows)
for r in cols:
os = r.count('O')
xs = r.count('X')
ts = r.count('T')
if r.count('O')==4 or r.count('O')==3 and r.count('T')==1:
pp('O won')
cont = True
break
elif r.count('X')==4 or r.count('X')==3 and r.count('T')==1:
pp('X won')
cont=True
break
if cont:
continue
leftDiag = [
[0,0],
[1,1],
[2,2],
[3,3]
]
rightDiag = [
[0,3],
[1,2],
[2,1],
[3,0]
]
left = []
for pos in leftDiag:
left.append(cols[pos[0]][pos[1]])
right = []
for pos in rightDiag:
right.append(cols[pos[0]][pos[1]])
osL = left.count('O')
xsL = left.count('X')
tsL = left.count('T')
osR = right.count('O')
xsR = right.count('X')
tsR = right.count('T')
if osL==4 or osL==3 and tsL==1:
pp('O won')
continue
elif xsL==4 or xsL==3 and tsL==1:
pp('X won')
continue
if osR==4 or osR==3 and tsR==1:
pp('O won')
continue
elif xsR==4 or xsR==3 and tsR==1:
pp('X won')
continue
for r in cols:
if r.count('.') > 0:
pp('Game has not completed')
break
else:
pp('Draw')
fout.close() | [
"[email protected]"
] | |
bece03e4588034620ff88252c2a43363cd91a680 | 04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4 | /Lib/objc/_MaterialKit.py | e6f20a6022f0fd5e3b40448fc97fc45454fa77e7 | [
"MIT"
] | permissive | ColdGrub1384/Pyto | 64e2a593957fd640907f0e4698d430ea7754a73e | 7557485a733dd7e17ba0366b92794931bdb39975 | refs/heads/main | 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 | MIT | 2023-02-26T21:34:04 | 2018-09-15T22:29:07 | C | UTF-8 | Python | false | false | 1,198 | py | """
Classes from the 'MaterialKit' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MTVisualStyling = _Class("MTVisualStyling")
MTVisualStylingProvider = _Class("MTVisualStylingProvider")
MTMappedImageCache = _Class("MTMappedImageCache")
MTLumaDodgePillSettings = _Class("MTLumaDodgePillSettings")
MTLumaDodgePillStyleSettings = _Class("MTLumaDodgePillStyleSettings")
MTLumaDodgePillDomain = _Class("MTLumaDodgePillDomain")
_MTVisualStylingVibrancyEffect = _Class("_MTVisualStylingVibrancyEffect")
MTMaterialShadowView = _Class("MTMaterialShadowView")
MTMaterialView = _Class("MTMaterialView")
_MTStaticVibrantColorMaterialView = _Class("_MTStaticVibrantColorMaterialView")
MTStylingProvidingSolidColorView = _Class("MTStylingProvidingSolidColorView")
_MTLumaDodgePillLowQualityEffectView = _Class("_MTLumaDodgePillLowQualityEffectView")
MTPillView = _Class("MTPillView")
MTStaticColorPillView = _Class("MTStaticColorPillView")
MTLumaDodgePillView = _Class("MTLumaDodgePillView")
MTShadowView = _Class("MTShadowView")
| [
"[email protected]"
] | |
08786c00f9edae0282a030938082a24985a78bfb | 3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d | /django-rest-framework/day132_01/venv/Scripts/pip-script.py | 30ef622f420166e537def6d1a18be62e0beda706 | [] | no_license | yingkun1/python-django | a3084460a83682f3e0848d5b40c881f93961ecc2 | 08c9ed3771eb245ee9ff66f67cf28730d2675bbe | refs/heads/master | 2022-12-11T12:33:20.788524 | 2019-06-12T09:30:59 | 2019-06-12T09:30:59 | 189,977,625 | 1 | 0 | null | 2022-11-22T02:57:01 | 2019-06-03T09:43:30 | Python | UTF-8 | Python | false | false | 421 | py | #!E:\python-django\django-rest-framework\day132_01\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
10adf812c3b3204b61a65a1935b9b8ed4ed9a65d | 2d1fdf69b3f00d1840f2e8956b91bd97c4777c24 | /mohet/mohet_app/migrations/0002_auto_20210604_2126.py | 210c1f080297d77c0f3631e679d1324652951377 | [] | no_license | DiyarBarham/project-mohet | d8a4df41ee19365dd9a6e4c756477f3ccaffd60f | 03b92cd28fa315322d76f5e1632b5403f117fa25 | refs/heads/master | 2023-05-25T11:47:05.351526 | 2021-06-06T14:53:31 | 2021-06-06T14:53:31 | 374,387,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | # Generated by Django 2.2.4 on 2021-06-04 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mohet_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='article',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='comment',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='contact',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='media',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='mediatype',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='role',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='subscription',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"[email protected]"
] | |
3463202e5e93b0f832989da3363bc365a09d764d | 07917881310fc81d85a2cbdf27c9b3c4fa03c694 | /python1812/python_4/四阶段/day01/code/python3code/job51.py | 959f5d9501613b5eb6c690f4ff91b6599ea99e23 | [] | no_license | zaoyuaner/Learning-materials | 9bc9a127d1c6478fb6cebbb6371b1fd85427c574 | 1f468a6f63158758f7cbfe7b5df17f51e3205f04 | refs/heads/master | 2020-05-18T11:38:45.771271 | 2019-05-20T09:07:44 | 2019-05-20T09:07:44 | 184,384,050 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | import re
import urllib.request
headers = {
"User-Agent":"User-Agent, Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
}
#获取前程无忧的接口
url = "https://search.51job.com/list/040000%252C010000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
#抓取数据 创建请求对象
req = urllib.request.Request(url,headers=headers)
#获取服务器响应数据
response = urllib.request.urlopen(req)
# print(response)
#解码
html = response.read().decode('gbk')
# print(html)
#print(type(html))
# 处理数据 拿到标签中间所有的内容
jobnum_re = '<div class="rt">(.*?)</div>'
coms = re.compile(jobnum_re,re.S)
strs = coms.findall(html)[0]
# print(strs)
#贪婪模式 非贪婪模式
#非贪婪模式加上 ? 变成了 贪婪模式
#取出 纯数字
num_re = '.*?(\d+).*'
num = re.findall(num_re,strs)
# print(num)
# print(int(num[0]))
#获取第一个岗位信息
jobname_re = '<div class="el">(.*?)</div>'
joblist = re.findall(jobname_re,html,re.S)
# #print(joblist[0]) #这是第一个岗位的信息 多个标签
#
# #匹配岗位内容
# jobnameone_re = 'onmousedown="">(.*?)</a>'
# jobnameone_list = re.findall(jobnameone_re,joblist[1],re.S)
# print(jobnameone_list[0].strip())
for job in joblist:
jobnameone_re = 'onmousedown="">(.*?)</a>'
jobnameone_list = re.findall(jobnameone_re, job, re.S)
print(jobnameone_list)
#print("岗位名称:",jobnameone_list[0].strip())
| [
"[email protected]"
] | |
685728b1b73a4e7b80a069a6247b6266c4e245ae | e16cf4d5dc7a0055892feee2397ac341a4c9b375 | /data-structures/stacks_and_queues/test_stacks_and_queues.py | 6ed1937c2e223db01050abe6469abfaee4309c5a | [
"MIT"
] | permissive | arsummers/python-data-structures-and-algorithms | 47935b75216594566a706083f91c6d71ae01a96c | 30a488bd1100d8edac3b7fda73f7d7d999c61bfc | refs/heads/master | 2021-06-18T08:33:36.210039 | 2020-09-24T23:23:32 | 2020-09-24T23:23:32 | 195,889,918 | 3 | 1 | MIT | 2021-04-20T18:35:42 | 2019-07-08T21:42:19 | Python | UTF-8 | Python | false | false | 2,307 | py | import pytest
from stacks_and_queues import Stack, Queue
# Can successfully push onto a stack
def test_stack_push_one():
s = Stack()
s.push('a')
assert s.peek() == 'a'
# Can successfully push multiple values onto a stack
def test_stack_push_multiple():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
assert s.peek() == 'c'
# Can successfully pop off the stack
def test_stack_pop():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
s.pop('a')
assert s.peek() == 'b'
# Can successfully empty a stack after multiple pops
def test_pop_stack_to_empty():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
s.pop('a')
s.pop('b')
s.pop('c')
assert s.peek() is None
# Can successfully peek the next item on the stack)
def test_peek_next_item():
s = Stack()
s.push('a')
s.push('b')
s.push('c')
assert s.peek() == 'c'
# Can successfully instantiate an empty stack
def test_instantiate_empty_stack():
s = Stack()
assert s.peek() is None
# TESTS FOR QUEUES
# Can successfully enqueue into a queue
def test_enqueue_single_value():
q = Queue()
q.enqueue('a')
assert q.peek() == 'a'
# Can successfully enqueue multiple values into a queue
def test_enqueue_multiple_values():
q = Queue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
assert q.peek() == 'a'
# Can successfully dequeue out of a queue the expected value
def test_dequeue():
q = Queue()
q.dequeue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
q.enqueue('d')
q.enqueue('e')
q.enqueue('g')
assert q.dequeue() == 'a'
# Can successfully peek into a queue, seeing the expected value
def test_peek_into_queue():
q = Queue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
q.enqueue('d')
q.enqueue('e')
q.enqueue('g')
assert q.peek() == 'a'
# Can successfully empty a queue after multiple dequeues
def test_dequeue_queue_to_empty():
q = Queue()
q.enqueue('a')
q.enqueue('b')
q.enqueue('c')
q.dequeue()
q.dequeue()
q.dequeue()
assert q.peek() == None
# Can successfully instantiate an empty queue
def test_instantiate_empty_queue():
q = Queue()
assert q.peek() is None | [
"[email protected]"
] | |
88f49fdc3e4695c32dc863b2acf20931279d3de7 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part000476.py | 79c655e3af493ac7bfbe5077aba07884817c5816 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher43221(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.1', 1, 1, None), Mul)
]),
2: (2, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher43221._instance is None:
CommutativeMatcher43221._instance = CommutativeMatcher43221()
return CommutativeMatcher43221._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 43220
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 43222
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 43223
if len(subjects2) >= 1 and subjects2[0] == Integer(2):
tmp5 = subjects2.popleft()
# State 43224
if len(subjects2) == 0:
pass
# State 43225
if len(subjects) == 0:
pass
# 0: x**2
yield 0, subst1
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"[email protected]"
] | |
dc957f9141ae0629ab052acf85163d752bb8aff7 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /SpamBayes/rev2415-2775/left-trunk-2775/Outlook2000/dialogs/dialog_map.py | f3db3b7b05910836b8662a3830e1e7c851c9e6a5 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,240 | py | from processors import *
from opt_processors import *
import wizard_processors as wiz
from dialogs import ShowDialog, MakePropertyPage, ShowWizard
try:
enumerate
except NameError: # enumerate new in 2.3
def enumerate(seq):
return [(i, seq[i]) for i in xrange(len(seq))]
class StatsProcessor(ControlProcessor):
def __init__(self, window, control_ids):
self.button_id = control_ids[1]
self.reset_date_id = control_ids[2]
ControlProcessor.__init__(self, window, control_ids)
self.stats = self.window.manager.stats
def Init(self):
text = "\n".join(self.stats.GetStats())
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT, 0, text)
date_label = self.GetControl(self.reset_date_id)
if self.stats.from_date:
from time import localtime, strftime
reset_date = localtime(self.stats.from_date)
date_string = strftime("%a, %d %b %Y %I:%M:%S %p", reset_date)
else:
date_string = _("Never")
win32gui.SendMessage(date_label, win32con.WM_SETTEXT, 0, date_string)
def OnCommand(self, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == self.button_id:
self.ResetStatistics()
def GetPopupHelpText(self, idFrom):
if idFrom == self.control_id:
return _("Displays statistics on mail processed by SpamBayes")
elif idFrom == self.button_id:
return _("Resets all SpamBayes statistics to zero")
elif idFrom == self.reset_date_id:
return _("The date and time when the SpamBayes statistics were last reset")
def ResetStatistics(self):
question = _("This will reset all your saved statistics to zero.\r\n\r\n" \
"Are you sure you wish to reset the statistics?")
flags = win32con.MB_ICONQUESTION | win32con.MB_YESNO | win32con.MB_DEFBUTTON2
if win32gui.MessageBox(self.window.hwnd,
question, "SpamBayes", flags) == win32con.IDYES:
self.stats.Reset()
self.stats.ResetTotal(True)
self.Init() # update the statistics display
class VersionStringProcessor(ControlProcessor):
def Init(self):
from spambayes.Version import get_current_version
import sys
v = get_current_version()
vstring = v.get_long_version("SpamBayes Outlook Addin")
if not hasattr(sys, "frozen"):
vstring += _(" from source")
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT, 0, vstring)
def GetPopupHelpText(self, cid):
return _("The version of SpamBayes running")
class TrainingStatusProcessor(ControlProcessor):
def Init(self):
bayes = self.window.manager.classifier_data.bayes
nspam = bayes.nspam
nham = bayes.nham
if nspam > 10 and nham > 10:
db_status = _("Database has %d good and %d spam.") % (nham, nspam)
db_ratio = nham/float(nspam)
big = small = None
if db_ratio > 5.0:
db_status = _("%s\nWarning: you have much more ham than spam - " \
"SpamBayes works best with approximately even " \
"numbers of ham and spam.") % (db_status, )
elif db_ratio < (1/5.0):
db_status = _("%s\nWarning: you have much more spam than ham - " \
"SpamBayes works best with approximately even " \
"numbers of ham and spam.") % (db_status, )
elif nspam > 0 or nham > 0:
db_status = _("Database only has %d good and %d spam - you should " \
"consider performing additional training.") % (nham, nspam)
else:
db_status = _("Database has no training information. SpamBayes " \
"will classify all messages as 'unsure', " \
"ready for you to train.")
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT,
0, db_status)
class WizardTrainingStatusProcessor(ControlProcessor):
def Init(self):
bayes = self.window.manager.classifier_data.bayes
nspam = bayes.nspam
nham = bayes.nham
if nspam > 10 and nham > 10:
msg = _("SpamBayes has been successfully trained and configured. " \
"You should find the system is immediately effective at " \
"filtering spam.")
else:
msg = _("SpamBayes has been successfully trained and configured. " \
"However, as the number of messages trained is quite small, " \
"SpamBayes may take some time to become truly effective.")
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT,
0, msg)
class IntProcessor(OptionControlProcessor):
def UpdateControl_FromValue(self):
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT, 0, str(self.option.get()))
def UpdateValue_FromControl(self):
buf_size = 100
buf = win32gui.PyMakeBuffer(buf_size)
nchars = win32gui.SendMessage(self.GetControl(), win32con.WM_GETTEXT,
buf_size, buf)
str_val = buf[:nchars]
val = int(str_val)
if val < 0 or val > 10:
raise ValueError, "Value must be between 0 and 10"
self.SetOptionValue(val)
def OnCommand(self, wparam, lparam):
code = win32api.HIWORD(wparam)
if code==win32con.EN_CHANGE:
try:
self.UpdateValue_FromControl()
except ValueError:
pass
class FilterEnableProcessor(BoolButtonProcessor):
def OnOptionChanged(self, option):
self.Init()
def Init(self):
BoolButtonProcessor.Init(self)
reason = self.window.manager.GetDisabledReason()
win32gui.EnableWindow(self.GetControl(), reason is None)
def UpdateValue_FromControl(self):
check = win32gui.SendMessage(self.GetControl(), win32con.BM_GETCHECK)
if check:
reason = self.window.manager.GetDisabledReason()
if reason is not None:
win32gui.SendMessage(self.GetControl(), win32con.BM_SETCHECK, 0)
raise ValueError, reason
check = not not check # force bool!
self.SetOptionValue(check)
class FilterStatusProcessor(ControlProcessor):
def OnOptionChanged(self, option):
self.Init()
def Init(self):
manager = self.window.manager
reason = manager.GetDisabledReason()
if reason is not None:
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT,
0, reason)
return
if not manager.config.filter.enabled:
status = _("Filtering is disabled. Select 'Enable SpamBayes' to enable.")
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT,
0, status)
return
config = manager.config.filter
certain_spam_name = manager.FormatFolderNames(
[config.spam_folder_id], False)
if config.unsure_folder_id:
unsure_name = manager.FormatFolderNames(
[config.unsure_folder_id], False)
unsure_text = _("unsure managed in '%s'") % (unsure_name,)
else:
unsure_text = _("unsure messages untouched")
watch_names = manager.FormatFolderNames(
config.watch_folder_ids, config.watch_include_sub)
filter_status = _("Watching '%s'. Spam managed in '%s', %s.") \
% (watch_names,
certain_spam_name,
unsure_text)
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT,
0, filter_status)
class TabProcessor(ControlProcessor):
def __init__(self, window, control_ids, page_ids):
ControlProcessor.__init__(self, window, control_ids)
self.page_ids = page_ids.split()
def Init(self):
self.pages = {}
self.currentPage = None
self.currentPageIndex = -1
self.currentPageHwnd = None
for index, page_id in enumerate(self.page_ids):
template = self.window.manager.dialog_parser.dialogs[page_id]
self.addPage(index, page_id, template[0][0])
self.switchToPage(0)
def Done(self):
if self.currentPageHwnd is not None:
if not self.currentPage.SaveAllControls():
win32gui.SendMessage(self.GetControl(), commctrl.TCM_SETCURSEL, self.currentPageIndex,0)
return False
return True
def OnNotify(self, nmhdr, wparam, lparam):
selChangedCode = 5177342
code = nmhdr[2]
if code==selChangedCode:
index = win32gui.SendMessage(self.GetControl(), commctrl.TCM_GETCURSEL, 0,0)
if index!=self.currentPageIndex:
self.switchToPage(index)
def switchToPage(self, index):
if self.currentPageHwnd is not None:
if not self.currentPage.SaveAllControls():
win32gui.SendMessage(self.GetControl(), commctrl.TCM_SETCURSEL, self.currentPageIndex,0)
return 1
win32gui.DestroyWindow(self.currentPageHwnd)
self.currentPage = MakePropertyPage(self.GetControl(), self.window.manager, self.window.config, self.pages[index])
self.currentPageHwnd = self.currentPage.CreateWindow()
self.currentPageIndex = index
return 0
def addPage(self, item, idName, label):
format = "iiiiiii"
lbuf = win32gui.PyMakeBuffer(len(label)+1)
address,l = win32gui.PyGetBufferAddressAndLen(lbuf)
win32gui.PySetString(address, label)
buf = struct.pack(format,
commctrl.TCIF_TEXT, # mask
0, # state
0, # state mask
address,
0, #unused
0, #image
item
)
item = win32gui.SendMessage(self.GetControl(),
commctrl.TCM_INSERTITEM,
item,
buf)
self.pages[item] = idName
def ShowAbout(window):
"""Opens the SpamBayes documentation in a browser"""
window.manager.ShowHtml("about.html")
def ShowTrainingDoc(window):
"""Opens documentation on SpamBayes training in a browser"""
window.manager.ShowHtml("docs/welcome.html")
def ShowDataFolder(window):
"""Uses Windows Explorer to show where SpamBayes data and configuration
files are stored
"""
import os
import sys
filesystem_encoding = sys.getfilesystemencoding()
os.startfile(window.manager.data_directory.encode(filesystem_encoding))
def ShowLog(window):
"""Opens the log file for the current SpamBayes session
"""
import sys, os, win32api, win32con
if hasattr(sys, "frozen"):
log_name = os.path.join(win32api.GetTempPath(), "spambayes1.log")
if not os.path.exists(log_name):
window.manager.ReportError(_("The log file for this session can not be located"))
else:
cmd = 'notepad.exe "%s"' % log_name
win32api.WinExec(cmd, win32con.SW_SHOW)
else:
question = _("As you are running from source-code, viewing the\n" \
"log means executing a Python program. If you already\n" \
"have a viewer running, the output may appear in either.\n\n"\
"Do you want to execute this viewer?")
if not window.manager.AskQuestion(question):
return
import win32traceutil # will already be imported
py_name = win32traceutil.__file__
if py_name[-1] in 'co': # pyc/pyo
py_name = py_name[:-1]
os.system('start ' + win32api.GetShortPathName(py_name))
def ResetConfig(window):
question = _("This will reset all configuration options to their default values\r\n\r\n" \
"It will not reset the folders you have selected, nor your\r\n" \
"training information, but all other options will be reset\r\n" \
"and SpamBayes will need to be re-enabled before it will\r\n" \
"continue filtering.\r\n\r\n" \
"Are you sure you wish to reset all options?")
flags = win32con.MB_ICONQUESTION | win32con.MB_YESNO | win32con.MB_DEFBUTTON2
if win32gui.MessageBox(window.hwnd,
question, "SpamBayes",flags) == win32con.IDYES:
options = window.config._options
for sect in options.sections():
for opt_name in options.options_in_section(sect):
opt = options.get_option(sect, opt_name)
if not opt.no_restore():
assert opt.is_valid(opt.default_value), \
"Resetting '%s' to invalid default %r" % (opt.display_name(), opt.default_value)
opt.set(opt.default_value)
window.LoadAllControls()
class DialogCommand(ButtonProcessor):
def __init__(self, window, control_ids, idd):
self.idd = idd
ButtonProcessor.__init__(self, window, control_ids)
def OnClicked(self, id):
parent = self.window.hwnd
self.window.SaveAllControls()
ShowDialog(parent, self.window.manager, self.window.config, self.idd)
self.window.LoadAllControls()
def GetPopupHelpText(self, id):
dd = self.window.manager.dialog_parser.dialogs[self.idd]
return _("Displays the %s dialog") % dd.caption
class HiddenDialogCommand(DialogCommand):
def __init__(self, window, control_ids, idd):
DialogCommand.__init__(self, window, control_ids, idd)
def Init(self):
DialogCommand.Init(self)
win32gui.SetWindowText(self.GetControl(), "")
def OnCommand(self, wparam, lparam):
pass
def OnRButtonUp(self, wparam, lparam):
self.OnClicked(0)
def GetPopupHelpText(self, id):
return _("Nothing to see here.")
class ShowWizardCommand(DialogCommand):
def OnClicked(self, id):
import win32con
existing = self.window
manager = self.window.manager
dlg = self.window.hwnd
while dlg:
style = win32api.GetWindowLong(dlg, win32con.GWL_STYLE)
if not style & win32con.WS_CHILD:
break
dlg = win32gui.GetParent(dlg)
else:
assert 0, "no parent!"
try:
parent = win32gui.GetParent(dlg)
except win32gui.error:
parent = 0 # no parent
win32gui.EndDialog(dlg, win32con.IDOK)
ShowWizard(parent, manager, self.idd, use_existing_config = True)
def WizardFinish(mgr, window):
print _("Wizard Done!")
def WizardTrainer(mgr, config, progress):
import os, manager, train
bayes_base = os.path.join(mgr.data_directory, "$sbwiz$default_bayes_database")
mdb_base = os.path.join(mgr.data_directory, "$sbwiz$default_message_database")
fnames = []
for ext in ".pck", ".db":
fnames.append(bayes_base+ext)
fnames.append(mdb_base+ext)
config.wizard.temp_training_names = fnames
ManagerClass = manager.GetStorageManagerClass()
db_manager = ManagerClass(bayes_base, mdb_base)
classifier_data = manager.ClassifierData(db_manager, mgr)
classifier_data.InitNew()
rescore = config.training.rescore
if rescore:
stages = (_("Training"), .3), (_("Saving"), .1), (_("Scoring"), .6)
else:
stages = (_("Training"), .9), (_("Saving"), .1)
progress.set_stages(stages)
train.real_trainer(classifier_data, config, mgr.message_store, progress)
orig_classifier_data = mgr.classifier_data
mgr.classifier_data = classifier_data # temporary
try:
progress.tick()
if rescore:
now_config = config.filter_now
now_config.only_unread = False
now_config.only_unseen = False
now_config.action_all = False
now_config.folder_ids = config.training.ham_folder_ids + \
config.training.spam_folder_ids
now_config.include_sub = config.training.ham_include_sub or \
config.training.spam_include_sub
import filter
filter.filterer(mgr, config, progress)
bayes = classifier_data.bayes
progress.set_status(_("Completed training with %d spam and %d good messages") \
% (bayes.nspam, bayes.nham))
finally:
mgr.wizard_classifier_data = classifier_data
mgr.classifier_data = orig_classifier_data
from async_processor import AsyncCommandProcessor
import filter, train
dialog_map = {
"IDD_MANAGER" : (
(CloseButtonProcessor, "IDOK IDCANCEL"),
(TabProcessor, "IDC_TAB",
"""IDD_GENERAL IDD_FILTER IDD_TRAINING
IDD_STATISTICS IDD_ADVANCED"""),
(CommandButtonProcessor, "IDC_ABOUT_BTN", ShowAbout, ()),
),
"IDD_GENERAL": (
(ImageProcessor, "IDC_LOGO_GRAPHIC"),
(VersionStringProcessor, "IDC_VERSION"),
(TrainingStatusProcessor, "IDC_TRAINING_STATUS"),
(FilterEnableProcessor, "IDC_BUT_FILTER_ENABLE", "Filter.enabled"),
(FilterStatusProcessor, "IDC_FILTER_STATUS"),
(ShowWizardCommand, "IDC_BUT_WIZARD", "IDD_WIZARD"),
(CommandButtonProcessor, "IDC_BUT_RESET", ResetConfig, ()),
),
"IDD_FILTER_NOW" : (
(CloseButtonProcessor, "IDCANCEL"),
(BoolButtonProcessor, "IDC_BUT_UNREAD", "Filter_Now.only_unread"),
(BoolButtonProcessor, "IDC_BUT_UNSEEN", "Filter_Now.only_unseen"),
(BoolButtonProcessor, "IDC_BUT_ACT_ALL IDC_BUT_ACT_SCORE",
"Filter_Now.action_all"),
(FolderIDProcessor, "IDC_FOLDER_NAMES IDC_BROWSE",
"Filter_Now.folder_ids",
"Filter_Now.include_sub"),
(AsyncCommandProcessor, "IDC_START IDC_PROGRESS IDC_PROGRESS_TEXT",
filter.filterer,
_("Start Filtering"), _("Stop Filtering"),
"""IDCANCEL IDC_BUT_UNSEEN
IDC_BUT_UNREAD IDC_BROWSE IDC_BUT_ACT_SCORE
IDC_BUT_ACT_ALL"""),
),
"IDD_FILTER" : (
(FolderIDProcessor, "IDC_FOLDER_WATCH IDC_BROWSE_WATCH",
"Filter.watch_folder_ids",
"Filter.watch_include_sub"),
(ComboProcessor, "IDC_ACTION_CERTAIN", "Filter.spam_action"),
(FolderIDProcessor, "IDC_FOLDER_CERTAIN IDC_BROWSE_CERTAIN",
"Filter.spam_folder_id"),
(EditNumberProcessor, "IDC_EDIT_CERTAIN IDC_SLIDER_CERTAIN",
"Filter.spam_threshold"),
(BoolButtonProcessor, "IDC_MARK_SPAM_AS_READ", "Filter.spam_mark_as_read"),
(FolderIDProcessor, "IDC_FOLDER_UNSURE IDC_BROWSE_UNSURE",
"Filter.unsure_folder_id"),
(EditNumberProcessor, "IDC_EDIT_UNSURE IDC_SLIDER_UNSURE",
"Filter.unsure_threshold"),
(ComboProcessor, "IDC_ACTION_UNSURE", "Filter.unsure_action"),
(BoolButtonProcessor, "IDC_MARK_UNSURE_AS_READ", "Filter.unsure_mark_as_read"),
(FolderIDProcessor, "IDC_FOLDER_HAM IDC_BROWSE_HAM",
"Filter.ham_folder_id"),
(ComboProcessor, "IDC_ACTION_HAM", "Filter.ham_action"),
),
"IDD_TRAINING" : (
(FolderIDProcessor, "IDC_STATIC_HAM IDC_BROWSE_HAM",
"Training.ham_folder_ids",
"Training.ham_include_sub"),
(FolderIDProcessor, "IDC_STATIC_SPAM IDC_BROWSE_SPAM",
"Training.spam_folder_ids",
"Training.spam_include_sub"),
(BoolButtonProcessor, "IDC_BUT_RESCORE", "Training.rescore"),
(BoolButtonProcessor, "IDC_BUT_REBUILD", "Training.rebuild"),
(AsyncCommandProcessor, "IDC_START IDC_PROGRESS IDC_PROGRESS_TEXT",
train.trainer, _("Start Training"), _("Stop"),
"IDOK IDCANCEL IDC_BROWSE_HAM IDC_BROWSE_SPAM " \
"IDC_BUT_REBUILD IDC_BUT_RESCORE"),
(BoolButtonProcessor, "IDC_BUT_TRAIN_FROM_SPAM_FOLDER",
"Training.train_recovered_spam"),
(BoolButtonProcessor, "IDC_BUT_TRAIN_TO_SPAM_FOLDER",
"Training.train_manual_spam"),
(ComboProcessor, "IDC_DEL_SPAM_RS", "General.delete_as_spam_message_state",
_("not change the message,mark the message as read,mark the message as unread")),
(ComboProcessor, "IDC_RECOVER_RS", "General.recover_from_spam_message_state",
_("not change the message,mark the message as read,mark the message as unread")),
),
"IDD_STATISTICS" : (
(StatsProcessor, "IDC_STATISTICS IDC_BUT_RESET_STATS " \
"IDC_LAST_RESET_DATE"),
),
"IDD_ADVANCED" : (
(BoolButtonProcessor, "IDC_BUT_TIMER_ENABLED", "Filter.timer_enabled",
"""IDC_DELAY1_TEXT IDC_DELAY1_SLIDER
IDC_DELAY2_TEXT IDC_DELAY2_SLIDER
IDC_INBOX_TIMER_ONLY"""),
(EditNumberProcessor, "IDC_DELAY1_TEXT IDC_DELAY1_SLIDER", "Filter.timer_start_delay", 0.4, 10, 20, 60),
(EditNumberProcessor, "IDC_DELAY2_TEXT IDC_DELAY2_SLIDER", "Filter.timer_interval", 0.4, 10, 20, 60),
(BoolButtonProcessor, "IDC_INBOX_TIMER_ONLY", "Filter.timer_only_receive_folders"),
(CommandButtonProcessor, "IDC_SHOW_DATA_FOLDER", ShowDataFolder, ()),
(DialogCommand, "IDC_BUT_SHOW_DIAGNOSTICS", "IDD_DIAGNOSTIC"),
),
"IDD_DIAGNOSTIC" : (
(BoolButtonProcessor, "IDC_SAVE_SPAM_SCORE", "Filter.save_spam_info"),
(IntProcessor, "IDC_VERBOSE_LOG", "General.verbose"),
(CommandButtonProcessor, "IDC_BUT_VIEW_LOG", ShowLog, ()),
(CloseButtonProcessor, "IDOK IDCANCEL"),
),
"IDD_WIZARD": (
(ImageProcessor, "IDC_WIZ_GRAPHIC"),
(CloseButtonProcessor, "IDCANCEL"),
(wiz.ConfigureWizardProcessor, "IDC_FORWARD_BTN IDC_BACK_BTN IDC_PAGE_PLACEHOLDER",
"""IDD_WIZARD_WELCOME IDD_WIZARD_FOLDERS_WATCH IDD_WIZARD_FOLDERS_REST
IDD_WIZARD_FOLDERS_TRAIN IDD_WIZARD_TRAIN
IDD_WIZARD_TRAINING_IS_IMPORTANT
IDD_WIZARD_FINISHED_UNCONFIGURED IDD_WIZARD_FINISHED_UNTRAINED
IDD_WIZARD_FINISHED_TRAINED IDD_WIZARD_FINISHED_TRAIN_LATER
""",
WizardFinish),
),
"IDD_WIZARD_WELCOME": (
(CommandButtonProcessor, "IDC_BUT_ABOUT", ShowAbout, ()),
(RadioButtonProcessor, "IDC_BUT_PREPARATION", "Wizard.preparation"),
),
"IDD_WIZARD_TRAINING_IS_IMPORTANT" : (
(BoolButtonProcessor, "IDC_BUT_TRAIN IDC_BUT_UNTRAINED", "Wizard.will_train_later"),
(CommandButtonProcessor, "IDC_BUT_ABOUT", ShowTrainingDoc, ()),
),
"IDD_WIZARD_FOLDERS_REST": (
(wiz.EditableFolderIDProcessor,"IDC_FOLDER_CERTAIN IDC_BROWSE_SPAM",
"Filter.spam_folder_id", "Wizard.spam_folder_name",
"Training.spam_folder_ids"),
(wiz.EditableFolderIDProcessor,"IDC_FOLDER_UNSURE IDC_BROWSE_UNSURE",
"Filter.unsure_folder_id", "Wizard.unsure_folder_name"),
),
"IDD_WIZARD_FOLDERS_WATCH": (
(wiz.WatchFolderIDProcessor,"IDC_FOLDER_WATCH IDC_BROWSE_WATCH",
"Filter.watch_folder_ids"),
),
"IDD_WIZARD_FOLDERS_TRAIN": (
(wiz.TrainFolderIDProcessor,"IDC_FOLDER_HAM IDC_BROWSE_HAM",
"Training.ham_folder_ids"),
(wiz.TrainFolderIDProcessor,"IDC_FOLDER_CERTAIN IDC_BROWSE_SPAM",
"Training.spam_folder_ids"),
(BoolButtonProcessor, "IDC_BUT_RESCORE", "Training.rescore"),
),
"IDD_WIZARD_TRAIN" : (
(wiz.WizAsyncProcessor, "IDC_PROGRESS IDC_PROGRESS_TEXT",
WizardTrainer, "", "",
""),
),
"IDD_WIZARD_FINISHED_UNCONFIGURED": (
),
"IDD_WIZARD_FINISHED_UNTRAINED": (
),
"IDD_WIZARD_FINISHED_TRAINED": (
(WizardTrainingStatusProcessor, "IDC_TRAINING_STATUS"),
),
"IDD_WIZARD_FINISHED_TRAIN_LATER" : (
),
}
| [
"[email protected]"
] | |
861c710c6e89b0ffb3546483f63f4f2e2af85838 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /first_eye/world/old_problem/right_government/eye_or_case.py | e661a8e74622ff9a608787dce1a027c2b2be1f36 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py |
#! /usr/bin/env python
def government(str_arg):
small_company(str_arg)
print('early_fact')
def small_company(str_arg):
print(str_arg)
if __name__ == '__main__':
government('week')
| [
"[email protected]"
] | |
aa00fe43e3785a75507fb2a0b3aaad991da96511 | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/python_re2_test_file/regexlib_2890.py | bbf1cafd2ceabdcfdc8a3bcc7fb879bc369f6b85 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # 2890
# (([\w|\.]*)\s*={1}\s*(.*?))$
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"1"*10000+"!_1!1 _SLQ_1"
import re2 as re
from time import perf_counter
regex = """(([\w|\.]*)\s*={1}\s*(.*?))$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "1" * i * 10000 + "!_1!1 _SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
"[email protected]"
] | |
e77c02bc990fe5e539985d34b266c7a0618a1fbb | b1ddae0f702f7af4a22ccf8e57eccb6778eaa8a5 | /apps/users/migrations/0006_auto_20180703_0743.py | 5623ef9fa7b291ce5b84c28a1155b44805a9c8f0 | [] | no_license | MoNaiZi/Mxoinline3 | 1cd1effa716bacbe4a7fc83c4687adc1fdbbea03 | 8d8ba1322fbaefcf8767160e1e2d05afc755fe5c | refs/heads/master | 2020-03-17T00:03:41.868735 | 2018-07-11T00:48:09 | 2018-07-11T00:48:09 | 133,101,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-07-03 07:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20180630_2043'),
]
operations = [
migrations.AlterField(
model_name='emailverifyrecord',
name='send_type',
field=models.CharField(choices=[('register', '注册'), ('forget', '找回密码'), ('update_email', '修改邮箱')], max_length=20),
),
]
| [
"[email protected]"
] | |
d6672c1c175bee083914f1102c4307b540023b3e | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/4.2/lib/python2.7/site-packages/hpe3parclient/http.py | ad5b9816917462ec781103c68601c6260431104e | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,675 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2012-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" HTTPJSONRESTClient.
.. module: http
:Author: Walter A. Boring IV
:Description: This is the HTTP Client that is used to make the actual calls.
It includes the authentication that knows the cookie name for 3PAR.
"""
import logging
import requests
import time
try:
import json
except ImportError:
import simplejson as json
from hpe3parclient import exceptions
class HTTPJSONRESTClient(object):
"""
An HTTP REST Client that sends and recieves JSON data as the body of the
HTTP request.
:param api_url: The url to the WSAPI service on 3PAR
ie. http://<3par server>:8080
:type api_url: str
:param secure: Validate SSL cert? Default will not validate
:type secure: bool
:param http_log_debug: Turns on http log debugging. Default will not log
:type http_log_debug: bool
:param suppress_ssl_warnings: Suppresses log warning messages if True.
Default will not suppress warnings.
:type suppress_ssl_warnings: bool
"""
USER_AGENT = 'python-3parclient'
SESSION_COOKIE_NAME = 'X-Hp3Par-Wsapi-Sessionkey'
http_log_debug = False
_logger = logging.getLogger(__name__)
# Retry constants
retry_exceptions = (exceptions.HTTPServiceUnavailable,
requests.exceptions.ConnectionError)
tries = 5
delay = 0
backoff = 2
def __init__(self, api_url, secure=False, http_log_debug=False,
suppress_ssl_warnings=False, timeout=None):
if suppress_ssl_warnings:
requests.packages.urllib3.disable_warnings()
self.session_key = None
# should be http://<Server:Port>/api/v1
self.set_url(api_url)
self.set_debug_flag(http_log_debug)
self.times = [] # [("item", starttime, endtime), ...]
self.secure = secure
self.timeout = timeout
def set_url(self, api_url):
# should be http://<Server:Port>/api/v1
self.api_url = api_url.rstrip('/')
def set_debug_flag(self, flag):
"""
This turns on/off http request/response debugging output to console
:param flag: Set to True to enable debugging output
:type flag: bool
"""
if not HTTPJSONRESTClient.http_log_debug and flag:
ch = logging.StreamHandler()
HTTPJSONRESTClient._logger.setLevel(logging.DEBUG)
HTTPJSONRESTClient._logger.addHandler(ch)
HTTPJSONRESTClient.http_log_debug = True
def authenticate(self, user, password, optional=None):
"""
This tries to create an authenticated session with the 3PAR server
:param user: The username
:type user: str
:param password: Password
:type password: str
"""
# this prevens re-auth attempt if auth fails
self.auth_try = 1
self.session_key = None
info = {'user': user, 'password': password}
self._auth_optional = None
if optional:
self._auth_optional = optional
info.update(optional)
resp, body = self.post('/credentials', body=info)
if body and 'key' in body:
self.session_key = body['key']
self.auth_try = 0
self.user = user
self.password = password
def _reauth(self):
self.authenticate(self.user, self.password, self._auth_optional)
def unauthenticate(self):
"""
This clears the authenticated session with the 3PAR server.
"""
# delete the session on the 3Par
self.delete('/credentials/%s' % self.session_key)
self.session_key = None
def get_timings(self):
"""
Ths gives an array of the request timings since last reset_timings call
"""
return self.times
def reset_timings(self):
"""
This resets the request/response timings array
"""
self.times = []
def _http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
HTTPJSONRESTClient._logger.debug("\nREQ: %s\n" % "".join(string_parts))
if 'body' in kwargs:
HTTPJSONRESTClient._logger.debug("REQ BODY: %s\n" %
(kwargs['body']))
def _http_log_resp(self, resp, body):
if not self.http_log_debug:
return
# Replace commas with newlines to break the debug into new lines,
# making it easier to read
HTTPJSONRESTClient._logger.debug("RESP:%s\n",
str(resp).replace("',", "'\n"))
HTTPJSONRESTClient._logger.debug("RESP BODY:%s\n", body)
def request(self, *args, **kwargs):
"""
This makes an HTTP Request to the 3Par server.
You should use get, post, delete instead.
"""
if self.session_key and self.auth_try != 1:
kwargs.setdefault('headers', {})[self.SESSION_COOKIE_NAME] = \
self.session_key
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['body'] = json.dumps(kwargs['body'])
payload = kwargs['body']
else:
payload = None
# args[0] contains the URL, args[1] contains the HTTP verb/method
http_url = args[0]
http_method = args[1]
self._http_log_req(args, kwargs)
r = None
resp = None
body = None
while r is None and self.tries > 0:
try:
# Check to see if the request is being retried. If it is, we
# want to delay.
if self.delay:
time.sleep(self.delay)
if self.timeout:
r = requests.request(http_method, http_url, data=payload,
headers=kwargs['headers'],
verify=self.secure,
timeout=self.timeout)
else:
r = requests.request(http_method, http_url, data=payload,
headers=kwargs['headers'],
verify=self.secure)
resp = r.headers
body = r.text
if isinstance(body, bytes):
body = body.decode('utf-8')
# resp['status'], status['content-location'], and resp.status
# need to be manually set as Python Requests doesn't provide
# them automatically.
resp['status'] = str(r.status_code)
resp.status = r.status_code
if 'location' not in resp:
resp['content-location'] = r.url
r.close()
self._http_log_resp(resp, body)
# Try and convert the body response to an object
# This assumes the body of the reply is JSON
if body:
try:
body = json.loads(body)
except ValueError:
pass
else:
body = None
if resp.status >= 400:
if body and 'message' in body:
body['desc'] = body['message']
raise exceptions.from_response(resp, body)
except requests.exceptions.SSLError as err:
HTTPJSONRESTClient._logger.error(
"SSL certificate verification failed: (%s). You must have "
"a valid SSL certificate or disable SSL "
"verification.", err)
raise exceptions.SSLCertFailed("SSL Certificate Verification "
"Failed.")
except self.retry_exceptions as ex:
# If we catch an exception where we want to retry, we need to
# decrement the retry count prepare to try again.
r = None
self.tries -= 1
self.delay = self.delay * self.backoff + 1
# Raise exception, we have exhausted all retries.
if self.tries is 0:
raise ex
except requests.exceptions.HTTPError as err:
raise exceptions.HTTPError("HTTP Error: %s" % err)
except requests.exceptions.URLRequired as err:
raise exceptions.URLRequired("URL Required: %s" % err)
except requests.exceptions.TooManyRedirects as err:
raise exceptions.TooManyRedirects(
"Too Many Redirects: %s" % err)
except requests.exceptions.Timeout as err:
raise exceptions.Timeout("Timeout: %s" % err)
except requests.exceptions.RequestException as err:
raise exceptions.RequestException(
"Request Exception: %s" % err)
return resp, body
def _time_request(self, url, method, **kwargs):
start_time = time.time()
resp, body = self.request(url, method, **kwargs)
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
return resp, body
def _do_reauth(self, url, method, ex, **kwargs):
# print("_do_reauth called")
try:
if self.auth_try != 1:
self._reauth()
resp, body = self._time_request(self.api_url + url, method,
**kwargs)
return resp, body
else:
raise ex
except exceptions.HTTPUnauthorized:
raise ex
def _cs_request(self, url, method, **kwargs):
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
resp, body = self._time_request(self.api_url + url, method,
**kwargs)
return resp, body
except exceptions.HTTPUnauthorized as ex:
# print("_CS_REQUEST HTTPUnauthorized")
resp, body = self._do_reauth(url, method, ex, **kwargs)
return resp, body
except exceptions.HTTPForbidden as ex:
# print("_CS_REQUEST HTTPForbidden")
resp, body = self._do_reauth(url, method, ex, **kwargs)
return resp, body
def get(self, url, **kwargs):
"""
Make an HTTP GET request to the server.
.. code-block:: python
#example call
try {
headers, body = http.get('/volumes')
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON, it
will be an object
"""
return self._cs_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
"""
Make an HTTP POST request to the server.
.. code-block:: python
#example call
try {
info = {'name': 'new volume name', 'cpg': 'MyCPG',
'sizeMiB': 300}
headers, body = http.post('/volumes', body=info)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON, it
will be an object
"""
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
"""
Make an HTTP PUT request to the server.
.. code-block:: python
#example call
try {
info = {'name': 'something'}
headers, body = http.put('/volumes', body=info)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON,
it will be an object
"""
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
"""
Make an HTTP DELETE request to the server.
.. code-block:: python
#example call
try {
headers, body = http.delete('/volumes/%s' % name)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON, it
will be an object
"""
return self._cs_request(url, 'DELETE', **kwargs)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.