blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a44ec4ba2cddc599611798c656eaece5002be537 | 4da66858f5278af1b1c9e1d0099a9e422400dcfa | /python_spider/test_spider/test_spider.py | f00b98fe900c4783aa078041a49e1ee89314edce | [] | no_license | hi-noikiy/TestProject | 62eb719a113b15be98a099e099afbb20f99d7062 | e37617c9f1bee058c196f776451c1d81d9fd5f86 | refs/heads/master | 2022-01-05T03:00:05.498931 | 2018-09-26T09:28:10 | 2018-09-26T09:28:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import requests
from lxml import etree
html = etree.parse('hello.html')
print(type(html))
result = html.xpath('//li')
print(result)
print(len(result))
print(type(result))
print(type(result[0]))
result1 = html.xpath('//li/@class')
print(result1)
# res = etree.tostring(html)
# print(res) | [
"[email protected]"
] | |
6ca0aacda5d9852d4c3a3174f5af3c2e3a61a37f | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686275109552128_0/Python/SozoS/B.py | c5247b1e1c0f2a3ec047c25e3d0c6464b0df70d0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | # Author: Davin Choo
# Email: [email protected]
# Observations:
# Doing all the splits before eating cannot make the solution worse
# Always splitting to an empty plate cannot make the solution worse
# So, we always split first, and split to empty plate
# (Exploit fact that we have infinite customers)
# Approach: Brute force search
# Set a maximum height to allow customers to eat at
# Eat for the lowest stack height given that constraint
T = int(raw_input())
for i in xrange(0, T):
D = int(raw_input())
temp = raw_input().split()
sizes = {}
largest = 0
for j in xrange(0, D):
pj = int(temp[j])
largest = max(largest, pj)
if pj not in sizes:
sizes[pj] = 0
sizes[pj] += 1
# Sort pancake stack height in descending order
descending = sorted(sizes, reverse = True)
# Brute force check all possibilities
best_time = largest
best_height = largest
for h in xrange(largest-1, 1, -1):
current_time = 0
# Split
for d in descending:
if d <= h:
break
else: # d > h
if d % h == 0:
current_time += sizes[d] * (d/h-1)
else:
current_time += sizes[d] * d/h
# Eat
current_time += h
# Update
if current_time < best_time:
best_time = current_time
best_height = h
# print best_height
print "Case #" + str(i+1) + ": " + str(best_time) | [
"[email protected]"
] | |
362e510aa9dea235099c8a3569ec36a3eaee134d | e05e2d26e38ce80530e3458ce3c8e02f16e5cbe6 | /lineupSolver/lineup_generator_conquest.py | 85b8ef8047583f33771233bb2a188e0860f97836 | [] | no_license | jegutman/hearthstone_decks | 96acca7e040cb9b89253a867217655ce8cdf2756 | 95d4563c46618a9efccc10dbb34094258ec5bce7 | refs/heads/master | 2020-12-25T08:16:29.892068 | 2019-09-12T05:28:16 | 2019-09-12T05:28:16 | 102,289,609 | 3 | 0 | null | 2018-07-06T22:35:17 | 2017-09-03T19:40:44 | Python | UTF-8 | Python | false | false | 17,511 | py | import sys
sys.path.append('../')
from config import basedir
sys.path.append(basedir)
sys.path.append(basedir + '/lineupSolver')
from shared_utils import *
from json_win_rates import *
#from blended_win_rates import *
from conquest_utils import *
import datetime
def print_time():
print(datetime.datetime.now())
if __name__ == '__main__':
level1, level2, level3, level4, level5, level6, level7, level8, level9, level10, level11, level12, level13, level14, level15, level16 = None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
lineups_to_test = [
"Malygos Druid,Quest Rogue,Deathrattle Hunter,Clone Priest",
"Secret Odd Mage,Even Paladin,Pirate Rogue,Midrange Hunter",
"Malygos Druid,Quest Rogue,Deathrattle Hunter,Clone Priest",
"Even Warlock,Quest Rogue,Deathrattle Hunter,Clone Priest",
]
lineups_to_test = [l.split(',') for l in lineups_to_test]
weights = [1 for l in lineups_to_test]
import sys
args = sys.argv[1:]
custom = {
}
#useCustom = True
useCustom = False
if useCustom:
lineups_to_test = []
weights = []
for i in custom.values():
lineups_to_test.append(i.split(','))
weights.append(1)
inverse = {}
for i,j in custom.items():
inverse[j] = i
if len(args) > 0 and args[0] == 'practice':
win_pcts, num_games, game_count, archetypes, overall_wr = get_win_pcts(min_game_threshold=0, min_game_count=0,limitTop=100)
if False:
filename = 'wr_na.csv'
#win_pcts2, archetypes2 = wr_from_csv(filename, scaling=100)
#win_pcts2, archetypes2 = wr_from_csv(filename, scaling=100)
win_pcts2, archetypes = wr_from_csv(filename, scaling=100)
win_pcts.update(win_pcts2)
overrides = [
]
win_pcts = override_wr(overrides,win_pcts)
my_lineup = [d.strip() for d in args[1].split(',')]
#opp_lineup = [d.strip() for d in deck_2.split(',')]
count, total = 0, 0.0
values = []
bans = {}
line = ['deck1', 'deck2', 'deck3', 'deck4', 'win_pct', 'ban', 'opp_win_pct', 'opp_ban', 'ban_details->']
print(",".join([str(i) for i in line]))
#print archetypes
for opp_lineup, weight in zip(lineups_to_test, weights):
assert all([d in archetypes for d in my_lineup]), ([d in archetypes for d in my_lineup], my_lineup)
assert all([d in archetypes for d in opp_lineup]), ([d in archetypes for d in opp_lineup], opp_lineup)
ban, win_pct = win_rate(my_lineup, opp_lineup, win_pcts)
bans[ban] = bans.get(ban, 0) + 1
#second_ban = sorted(win_rate(my_lineup, opp_lineup, win_pcts).items())[-2][0]
if win_pct > 0:
count += weight
total += win_pct * weight
values.append(win_pct)
opp_ban, opp_win_pct = win_rate(opp_lineup, my_lineup, win_pcts)
#print ",".join([str(i) for i in [win_pct, opp_lineup, ban, win_pct, "weight", weight]])
printCsv = True
if not printCsv:
print "%-80s %-7s %-20s %-7s %-s" % (opp_lineup, win_pct, ban, opp_win_pct, opp_ban)
print " ", win_rate(my_lineup, opp_lineup, win_pcts)
print " ",pre_ban(my_lineup, opp_lineup, win_pcts), "\n"
else:
line = []
#line.append(ban)
#line.append(' ')
for l in opp_lineup:
line.append(l)
#line = ['deck1', 'deck2', 'deck3', 'deck4', 'win_pct', 'ban', 'opp_win_pct', 'opp_ban', 'ban_details->']
line.append(win_pct)
line.append(ban)
line.append(opp_win_pct)
line.append(opp_ban)
for i, j in sorted(pre_ban(my_lineup, opp_lineup, win_pcts).items(), key=lambda x:x[1], reverse=True):
line.append(i)
line.append(j)
print(",".join([str(i) for i in line]))
# BAN STUFF
showBans = False
if showBans:
print my_lineup, "vs", opp_lineup
win_rates_grid(my_lineup, opp_lineup, win_pcts, num_games)
res = pre_ban_old(my_lineup,
opp_lineup,
win_pcts)
print ""
print my_lineup, "vs", opp_lineup
print "bans"
print "%-20s %-20s" % ("p1_ban", "p2_ban", "p1_win_%")
#for i, j in sorted(res.items(), key=lambda x:-x[1]):
for i, j in sorted(res.items(), key=lambda x:(x[0][0], x[1])):
d1, d2 = i
print '%-20s %-20s %s' % (d1, d2, round(j,4))
print "\n\n"
print("average: %s" % (total / count))
print("min: %s" % min(values))
print("bans: %s" % sorted(bans.items(), key=lambda x:x[1], reverse=True))
elif len(args) > 0 and args[0] == 'custom':
win_pcts, num_games, game_count, archetypes, overall_wr = get_win_pcts(min_game_threshold=0, min_game_count=0)
#win_pcts[('Control Warrior', 'Control Warrior')] = 0.5
players = sorted(custom.keys())
for p1 in players:
for p2 in players:
if p1 == p2: continue
deck_1 = custom.get(p1)
deck_2 = custom.get(p2)
my_lineup = [d.strip() for d in deck_1.split(',')]
opp_lineup = [d.strip() for d in deck_2.split(',')]
assert all([d in archetypes for d in my_lineup]), ([d in archetypes for d in my_lineup], my_lineup)
assert all([d in archetypes for d in opp_lineup]), ([d in archetypes for d in opp_lineup], opp_lineup)
ban, win_pct = win_rate(my_lineup, opp_lineup, win_pcts)
print ",".join([str(i) for i in [p1, p2, ban, win_pct]])
elif len(args) > 0 and args[0] == 'simfile':
#win_pcts, num_games, game_count, archetypes, overall_wr = get_win_pcts(min_game_threshold=0, min_game_count=0)
win_pcts, archetypes = wr_from_csv('input_wr.csv', scaling=100)
print sorted(archetypes, key=lambda x:x.split()[-1])
archetypes.append('Unbeatable')
#archetypes.append('Fatigue Warrior')
overrides = [
]
win_pcts = override_wr(overrides,win_pcts)
if args[1] in custom.keys():
args[1] = custom.get(args[1])
if args[2] in custom.keys():
args[2] = custom.get(args[2])
my_lineup = [d.strip() for d in args[1].split(',')]
opp_lineup = [d.strip() for d in args[2].split(',')]
assert all([d in archetypes for d in my_lineup]), ([d in archetypes for d in my_lineup], my_lineup)
#assert all([d in archetypes for d in opp_lineup]), ([d in archetypes for d in opp_lineup], opp_lineup)
print my_lineup, "vs", opp_lineup
win_rates_grid(my_lineup, opp_lineup, win_pcts)
if len(my_lineup) < 4 or len(opp_lineup) < 4:
print round(post_ban(my_lineup, opp_lineup, win_pcts) * 100,2)
else:
print win_rate(my_lineup, opp_lineup, win_pcts)
print pre_ban(my_lineup, opp_lineup, win_pcts)
res = pre_ban_old(my_lineup,
opp_lineup,
win_pcts)
print ""
print my_lineup, "vs", opp_lineup
print "bans"
print "%-20s %-20s" % ("p1_ban", "p2_ban")
#for i, j in sorted(res.items(), key=lambda x:-x[1]):
for i, j in sorted(res.items(), key=lambda x:(x[0][0], x[1])):
d1, d2 = i
print '%-20s %-20s %s' % (d1, d2, round(j,4))
my_lineup, opp_lineup = opp_lineup, my_lineup
print my_lineup, "vs", opp_lineup
win_rates_grid(my_lineup, opp_lineup, win_pcts)
print win_rate(my_lineup, opp_lineup, win_pcts)
print pre_ban(my_lineup, opp_lineup, win_pcts)
res = pre_ban_old(my_lineup,
opp_lineup,
win_pcts)
print ""
print my_lineup, "vs", opp_lineup
print "bans"
print "%-20s %-20s" % ("p1_ban", "p2_ban")
#for i, j in sorted(res.items(), key=lambda x:-x[1]):
for i, j in sorted(res.items(), key=lambda x:(x[0][0], x[1])):
d1, d2 = i
print '%-20s %-20s %s' % (d1, d2, round(j,4))
elif len(args) > 0 and args[0] == 'sim':
win_pcts, num_games, game_count, archetypes, overall_wr = get_win_pcts(min_game_threshold=0, min_game_count=0)
print sorted(archetypes, key=lambda x:x.split()[-1])
if True:
filename = 'wc_wr.csv'
#win_pcts2, archetypes2 = wr_from_csv(filename, scaling=100)
win_pcts2, archetypes2 = wr_from_csv(filename, scaling=100)
win_pcts.update(win_pcts2)
archetypes.append('Unbeatable')
#archetypes.append('Fatigue Warrior')
overrides = [
#('Deathrattle Hunter', 'Odd Paladin', 0.40),
]
win_pcts = override_wr(overrides,win_pcts)
if args[1] in custom.keys():
args[1] = custom.get(args[1])
if args[2] in custom.keys():
args[2] = custom.get(args[2])
my_lineup = [d.strip() for d in args[1].split(',')]
opp_lineup = [d.strip() for d in args[2].split(',')]
assert all([d in archetypes for d in my_lineup]), ([d in archetypes for d in my_lineup], my_lineup)
assert all([d in archetypes for d in opp_lineup]), ([d in archetypes for d in opp_lineup], opp_lineup)
print my_lineup, "vs", opp_lineup
win_rates_grid(my_lineup, opp_lineup, win_pcts,num_games)
if len(my_lineup) < 4 or len(opp_lineup) < 4:
print round(post_ban(my_lineup, opp_lineup, win_pcts) * 100,2)
else:
print win_rate(my_lineup, opp_lineup, win_pcts)
print pre_ban(my_lineup, opp_lineup, win_pcts)
res = pre_ban_old(my_lineup,
opp_lineup,
win_pcts)
print ""
print my_lineup, "vs", opp_lineup
print "bans"
print "%-20s %-20s %s" % ("p1_ban", "p2_ban", "p1_win_%")
#for i, j in sorted(res.items(), key=lambda x:-x[1]):
for i, j in sorted(res.items(), key=lambda x:(x[0][0], x[1])):
d1, d2 = i
print '%-20s %-20s %s' % (d1, d2, round(j,4))
my_lineup, opp_lineup = opp_lineup, my_lineup
print my_lineup, "vs", opp_lineup
win_rates_grid(my_lineup, opp_lineup, win_pcts,num_games)
print win_rate(my_lineup, opp_lineup, win_pcts)
print pre_ban(my_lineup, opp_lineup, win_pcts)
res = pre_ban_old(my_lineup,
opp_lineup,
win_pcts)
print ""
print my_lineup, "vs", opp_lineup
print "bans"
print "%-20s %-20s %s" % ("p1_ban", "p2_ban", "p1_win_%")
#for i, j in sorted(res.items(), key=lambda x:-x[1]):
for i, j in sorted(res.items(), key=lambda x:(x[0][0], x[1])):
d1, d2 = i
print '%-20s %-20s %s' % (d1, d2, round(j,4))
else:
#### ESPORTS ARENA
win_pcts, num_games, game_count, archetypes, overall_wr = get_win_pcts(min_game_threshold=50, min_game_count=20, min_win_pct=0.44,limitTop=30)
#if True:
if False:
filename = 'wr_eu.csv'
#win_pcts2, archetypes2 = wr_from_csv(filename, scaling=100)
win_pcts2, archetypes2 = wr_from_csv(filename, scaling=100)
win_pcts.update(win_pcts2)
if False:
archetypes = [
'Token Druid',
'Taunt Druid',
'Malygos Druid',
'Mill Druid',
'Deathrattle Hunter',
'Secret Hunter',
#'Tempo Mage',
#'Big Spell Mage',
#'Murloc Mage',
'Odd Paladin',
#'Even Paladin',
'Resurrect Priest',
'Deathrattle Rogue',
'Odd Rogue',
'Quest Rogue',
'Kingsbane Rogue',
'Shudderwock Shaman',
'Zoo Warlock',
'Even Warlock',
'Cube Warlock',
"Mecha'thun Warlock",
'Odd Warrior',
"Mecha'thun Warrior",
]
overrides = [
#('Quest Rogue', 'Zoo Warlock', 0.5),
#('Quest Rogue', 'Odd Rogue', 0.5),
]
win_pcts = override_wr(overrides,win_pcts)
print sorted(archetypes, key=lambda x:x.split()[-1])
#archetypes.append('Quest Warrior')
excluded = []
if True:
excluded += []
#excluded += ['Secret Odd Mage','Odd Mage', 'Odd Warrior', 'Odd Quest Warrior', 'Malygos Rogue']
#excluded += ['Turvy OTK Priest', "Mecha'thun Druid"]
#excluded += ["Mecha'thun Priest"]
#excluded += ['Odd Warrior', 'Spell Hunter']
#excluded += ['Tempo Mage', 'Tempo Rogue', 'Murloc Mage', 'Recruit Hunter']
#excluded += ['Spiteful Druid', 'Kingsbane Rogue', 'Quest Mage']
print "\n\nEXCLUDING:", excluded
archetypes = [a for a in archetypes if a not in excluded]
win_rates_against_good = {}
print_time()
additional_archetypes = []
for lu_test in lineups_to_test:
for a in lu_test:
if a not in archetypes:
print("Rare Archetypes: %s" % a)
additional_archetypes.append(a)
lineups, archetype_map = generate_lineups(archetypes, additional_archetypes=additional_archetypes, num_classes=4)
archetypes.append('Unbeatable')
archetype_map[len(archetypes)] = 'Unbeatable'
inverse_map = {}
for i,j in archetype_map.items():
inverse_map[j] = i
win_pcts_int = {}
for i,a in archetype_map.items():
for j,b in archetype_map.items():
if (a,b) in win_pcts:
win_pcts_int[(i,j)] = win_pcts[(a,b)]
print_time()
print "testing %s lineups" % len(lineups)
if len(args) > 0 and args[0] == 'target':
lineups_to_test = []
for x in args[1:]:
tmp = [i.strip() for i in x.split(',')]
lineups_to_test.append(tmp)
weights = [1 for i in lineups_to_test]
print "\n"
print "TESTING vs LINEUPS"
for l in lineups_to_test:
print "%-80s" % (" ".join(l)), '"' + ",".join(l) + '"'
print "\n"
for lineup in lineups:
for lu_test in lineups_to_test:
lu_test = list(get_lineup(lu_test, inverse_map))
win_rates_against_good[lineup] = win_rates_against_good.get(lineup, []) + [win_rate(list(lineup), lu_test, win_pcts_int)]
for lineup_txt, winrates in sorted(win_rates_against_good.items(), key=lambda x: x[1][0][1], reverse=True)[:3]:
print lineup_txt, winrates
lu_strings = []
#for i,j in sorted(win_rates_against_good.items(), key=lambda x:sum([i[1] for i in x[1]]))[-10:]:
#for i,j in sorted(win_rates_against_good.items(), key=lambda x:min([i[1] for i in x[1]]))[-10:]:
#for i,j in sorted(win_rates_against_good.items(), key=lambda x:geometric_mean([i[1] for i in x[1]],weights))[-10:]:
print_time()
#for i,j in sorted(win_rates_against_good.items(), key=lambda x:sumproduct_normalize([i[1] for i in x[1]],weights) * 2 + min([i[1] for i in x[1]]))[-10:]:
for i,j in sorted(win_rates_against_good.items(), key=lambda x:sumproduct_normalize([i[1] for i in x[1]],weights))[-60:]:
i = get_lineup(i, archetype_map)
i_print = " " + "".join(["%-20s" % x for x in i])
#print "%-80s %s %s" % (i_print,j, round(sum([x[1] for x in j])/len(j),3)), '"' + ",".join(i) + '"'
x = []
for _i in j:
x.append((str(archetype_map[_i[0]]), _i[1]))
j = x
print "%-80s %s %s" % (i_print,j, round(sum([x[1] for x in j])/len(j),3))
lineup_string = ",".join(i)
lu_strings.append((lineup_string, round(sum([x[1] for x in j])/len(j),3), round(geometric_mean([i[1] for i in j],weights),3), round(min([x[1] for x in j]),3)))
print ' "' + lineup_string + '"'
for i,j,k,l in lu_strings:
print "".join(["%-20s" % x for x in i.split(',')]), j, k, l, ' "%(i)s"' % locals()
classes = ['Druid', 'Mage', 'Shaman', 'Priest', 'Hunter', 'Paladin', 'Rogue', 'Warrior', 'Warlock']
file = open('tmp_output.csv', 'w')
res = ['win pct'] + classes
file.write(",".join(res) + '\n')
for i,j,k,l in lu_strings:
res = [str(j)]
for c in classes:
res.append(" ".join([d for d in i.split(',') if d.split(' ')[-1] == c]))
#print(",".join(res))
file.write(",".join(res) + '\n')
| [
"[email protected]"
] | |
f865d11508ed722439228027809b46b8c7a88b52 | a25b18e3fd20e0b14f4c852a956b7471c0fb11c9 | /7_14_longest_public_prefix.py | 411f468a7c368a845760dd4a1bcf2458766d644a | [] | no_license | Kiriyamaku/learning_algorithm | efd8bc18b9cce978e30dfd4cd2e48d451a3e8429 | 6a1886477c0cccf24b077d175804866f2c76028a | refs/heads/master | 2022-12-11T11:13:44.736019 | 2020-08-22T03:36:49 | 2020-08-22T03:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
result=[]
for index,letter in enumerate(strs[0]):
print(strs[1:])
for word in strs[1:]:
print(word)
print(index-1)
print(len(word))
if index-1<=len(word) or letter != word[index]:
break
break
result.append(letter)
print(result)
if __name__ == "__main__":
a=Solution()
test_case=["flower","flow","flight"]
a.longestCommonPrefix(test_case) | [
"[email protected]"
] | |
6892bb43785b7888d4248352098f68fab19ad9bd | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /leetcode/2011. Final Value of Variable After Performing Operations/2011.py | 0af1a0518557b593bab1bfbcfcc10daf5e12eebf | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 393 | py | # https://helloacm.com/teaching-kids-programming-final-value-of-variable-after-performing-operations-via-reduce-function/
# https://leetcode.com/problems/final-value-of-variable-after-performing-operations/
# EASY, LAMBDA
class Solution:
def finalValueAfterOperations(self, operations: List[str]) -> int:
return reduce(lambda a, b: a + 1 if b[1] == '+' else a - 1, operations, 0)
| [
"[email protected]"
] | |
d9ff25c429389600a7664368ae7dfe218df68cf7 | bc02e2c69f425e03b609f466b0a2d52a455765dc | /0524/정수 삼각형_김현성.py | aecf9418b3d76d01db1b968653c3924d5a3df0dd | [] | no_license | gaberani/AlgorithmStudy | d795f449fe185c3993df90173f27b7eb74e02366 | 6d9d20ac29446d22f2e0ef7037f131c4a2f48762 | refs/heads/master | 2023-02-03T03:24:45.039238 | 2020-12-22T12:53:52 | 2020-12-22T12:53:52 | 287,101,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | n = int(input())
tri = [list(map(int, input().split())) for _ in range(n)]
# print(tri)
# 맨 왼쪽이면 무조건 자신 오른쪽 위를 더함
for i in range(1, n): # 첫번째 줄은 뺌
for j in range(i+1):
if j == 0:
tri[i][j] += tri[i-1][j]
elif j == i:
tri[i][j] += tri[i-1][j-1]
else:
tri[i][j] += max(tri[i-1][j-1], tri[i-1][j])
print(max(tri[n-1])) | [
"[email protected]"
] | |
4d73d8011aa6a57fb0f1a14eb63edbfd1dc93fdd | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /5WvxKLK55JvT8NLfR_12.py | 93e7a18be50b5ea02c4e61e23f3ac3de275208a6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | """
A square matrix (same number of rows as columns) is called _row diagonally
dominant_ if "the absolute value of each entry in the main diagonal is
strictly larger than the sum of the absolute values of the other entries in
that row".
To illustrate ...
[
[10, 3, 6],
[2, -9, -6],
[1, -1, 4]
]
The absolute values from top left to bottom right are:
* `10` = First item of first row.
* `9` = Second item of second row.
* `4` = Third item of third row.
... making a _row diagonal dominant_ total of 23.
**In the first row ...**
* The value of the _row diagonal dominant_ is `10`.
* The sum of the other absolute values are `3` and `6` make a total of `9`.
... so far, the matrix is _row diagonally dominant_ , since `10 > 9`.
**In the second row ...**
* The value of the _row diagonal dominant_ is `9`.
* The sum of the other absolute values in the second row are `3` and `6` which make a total of `9`.
... meaning the matrix is not _row diagonally dominant_ since `9 <= 9`.
[
[10, 3, 6],
[3, -9, -6],
[1, -1, 4]
]
For a square to be _row diagonally dominant_ , all of the rows in the square
have to be like Row 1.
Write a function that determines if a given square matrix is row diagonally
dominant.
### Examples
diag_dom([
[2, -1],
[-1, 2]
]) ➞ True
diag_dom([
[0, 1],
[1, 0]
]) ➞ False
diag_dom([
[10, 3, 6],
[2, -9, -6],
[1, -1, 4]
]) ➞ True
diag_dom([
[10, 3, 6],
[4, -9, -6],
[1, -1, 4]
]) ➞ False
### Notes
As in the examples, the size of the matrices will change, but they will always
be square.
"""
def diag_dom(arr):
return all(sum(abs(x) for x in row) < 2*abs(arr[i][i]) for i, row in zip(range(len(arr)), arr))
| [
"[email protected]"
] | |
0427134f48809a05a4591c82d7d171b9583a9c2a | dd72348fc25e5a844e18f4fd6eff326a143f5852 | /Chapter 02/nn_architecture/mnist.py | c7fbf893f7d04056d47b9044f7316181168b8617 | [
"MIT"
] | permissive | bpbpublications/Time-Series-Forecasting-using-Deep-Learning | 3d76a74b0106decefa8a6fb27e3379211416d928 | fd84553d33e912edb4a1400af0f9374e72747457 | refs/heads/main | 2023-09-02T23:42:19.707131 | 2021-10-19T16:45:28 | 2021-10-19T16:45:28 | 409,091,964 | 20 | 13 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | import torch.nn as nn
import torch.nn.functional as F
class MnistModel(nn.Module):
def __init__(self):
super(MnistModel, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size = 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size = 5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv2(x)
x = self.conv2_drop(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training = self.training)
x = self.fc2(x)
return F.log_softmax(x)
| [
"[email protected]"
] | |
ad11f693f8998e904ba57a59f2b2a3e5b9cbdab4 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/InteractionOps/operators/iops.py | 414d2054a0fa5bc9c908cc9342fe8a541f7004ff | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | import bpy
class IOPS(bpy.types.Operator):
bl_idname = "iops.main"
bl_label = "IOPS"
bl_options = {"REGISTER", "UNDO"}
modes_3d = {0: "VERT", 1: "EDGE", 2: "FACE"}
modes_uv = {0: "VERTEX", 1: "EDGE", 2: "FACE", 3: "ISLAND"}
modes_gpen = {0: "EDIT_GPENCIL", 1: "PAINT_GPENCIL", 2: "SCULPT_GPENCIL"}
modes_curve = {0: "EDIT_CURVE"}
modes_text = {0: "EDIT_TEXT"}
modes_meta = {0: "EDIT_META"}
modes_lattice = {0: "EDIT_LATTICE"}
modes_armature = {0: "EDIT", 1: "POSE"}
supported_types = {"MESH", "CURVE", "GPENCIL", "EMPTY", "TEXT", "META", "ARMATURE", "LATTICE"}
# Current mode
_mode_3d = ""
_mode_uv = ""
_mode_gpen = ""
_mode_curve = ""
_mode_text = ""
_mode_meta = ""
_mode_armature = ""
_mode_lattice = ""
@classmethod
def poll(cls, context):
return context.object is not None
def get_mode_3d(self, tool_mesh):
mode = ""
if tool_mesh[0]:
mode = "VERT"
elif tool_mesh[1]:
mode = "EDGE"
elif tool_mesh[2]:
mode = "FACE"
return mode
def execute(self, context):
# Object <-> Mesh
scene = bpy.context.scene
tool = bpy.context.tool_settings
tool_mesh = scene.tool_settings.mesh_select_mode
active_object = bpy.context.view_layer.objects.active
if active_object.type == "MESH":
_mode_3d = self.get_mode_3d(tool_mesh)
if (bpy.context.area.type == "VIEW_3D" or
(bpy.context.area.type == "IMAGE_EDITOR" and
tool.use_uv_select_sync is True)):
# Same modes for active sync in UV
# Go to Edit Mode
if bpy.context.mode == "OBJECT":
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_mode(type=self._mode_3d)
_mode_3d = self._mode_3d
# self.report({"INFO"}, _mode_3d)
return{"FINISHED"}
# Switch selection modes
# If activated same selection mode again switch to Object Mode
if (bpy.context.mode == "EDIT_MESH" and self._mode_3d != _mode_3d):
bpy.ops.mesh.select_mode(type=self._mode_3d)
_mode_3d = self._mode_3d
# self.report({"INFO"}, _mode_3d)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
# UV <-> Mesh
if bpy.context.area.type == "IMAGE_EDITOR":
# Go to Edit Mode and Select All
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
tool.uv_select_mode = self._mode_uv
_mode_uv = self._mode_uv
# self.report({"INFO"}, _mode_uv)
return{"FINISHED"}
elif self._mode_uv != _mode_uv:
tool.uv_select_mode = self._mode_uv
_mode_uv = self._mode_uv
# self.report({"INFO"}, _mode_uv)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
# Object <-> Curve
if active_object.type == "CURVE":
_mode_curve = "EDIT" if bpy.context.mode != "EDIT_CURVE" else "OBJECT"
bpy.ops.object.mode_set(mode=_mode_curve)
# self.report({"INFO"}, _mode_curve)
return{"FINISHED"}
# Object <-> GPencil
if active_object.type == "GPENCIL":
_mode_gpen = active_object.mode
if (bpy.context.area.type == "VIEW_3D"):
if bpy.context.mode == "OBJECT":
_mode_gpen = self._mode_gpen
bpy.ops.object.mode_set(mode=_mode_gpen)
# self.report({"INFO"}, _mode_gpen)
return{"FINISHED"}
elif self._mode_gpen != _mode_gpen:
bpy.ops.object.mode_set(mode=self._mode_gpen)
_mode_gpen = self._mode_gpen
# self.report({"INFO"}, _mode_gpen)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
return{"FINISHED"}
# Object <-> Text
if active_object.type == "FONT":
_mode_text = "EDIT" if bpy.context.mode != "EDIT_TEXT" else "OBJECT"
bpy.ops.object.mode_set(mode=_mode_text)
# self.report({"INFO"}, _mode_text)
return{"FINISHED"}
# Object <-> Meta
if active_object.type == "META":
_mode_meta = "EDIT" if bpy.context.mode != "EDIT_META" else "OBJECT"
# bpy.ops.object.mode_set(mode=_mode_meta)
bpy.ops.object.editmode_toggle()
# self.report({"INFO"}, _mode_meta)
return{"FINISHED"}
# Object <-> Armature
if active_object.type == "ARMATURE":
_mode_armature = active_object.mode
if (bpy.context.area.type == "VIEW_3D"):
if bpy.context.mode == "OBJECT":
_mode_armature = self._mode_armature
bpy.ops.object.mode_set(mode=_mode_armature)
# self.report({"INFO"}, _mode_armature)
return{"FINISHED"}
elif self._mode_armature != _mode_armature:
bpy.ops.object.mode_set(mode=self._mode_armature)
_mode_armature = self._mode_armature
# self.report({"INFO"}, _mode_armature)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
return{"FINISHED"}
# Object <-> Lattice
if active_object.type == "LATTICE":
_mode_lattice = "EDIT" if bpy.context.mode != "EDIT_LATTICE" else "OBJECT"
bpy.ops.object.mode_set(mode=_mode_lattice)
# self.report({"INFO"}, _mode_lattice)
return{"FINISHED"}
# Unsupported Types
if active_object.type not in self.supported_types:
self.report({"INFO"}, "Object type " + str(active_object.type) + " not supported by iOps!")
return{"FINISHED"}
return{"FINISHED"}
| [
"[email protected]"
] | |
7ff23331c1eb0d2eebc99527903657b00726812c | 496a63f41fa32e2bb3ecce0d35ff4374f1c02ad5 | /src/scripting/parser/combinator/phrase.py | 5ea68e0c555f1e424e15a43ff0aa9d39dd4eee26 | [
"BSD-3-Clause"
] | permissive | vincent-lg/avenew.one | bbfa8d44e68db943b8825e9d4a32a43e985778fe | fb7f98d331e47e2032ee1e51bf3e4b2592807fdf | refs/heads/main | 2023-02-14T00:28:53.511552 | 2021-01-13T11:13:07 | 2021-01-13T11:13:07 | 330,207,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | # Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Phrase combinator."""
from scripting.exceptions import ParseError
from scripting.parser.parser import Parser
class Phrase(Parser):
"""
Parser for a phrase.
A phrase is just a wrapper around a sub-parser. Its only
modification is that, when the sub-parser is done, the phrase
will raise an error if there are non-parsed tokens.
"""
def __init__(self, parser):
self.parser = parser
async def process(self, tokens):
"""Process the given tokens."""
result = await self.parser.process(tokens)
if tokens.empty(check_cursor=True):
return result
tokens.parse_error("incomplete input")
def repr(self, seen=None):
"""Return the parser's representation as a string."""
seen = seen or []
seen.append(self)
return "phrase" + self.parser.repr(seen=seen)
| [
"[email protected]"
] | |
5c9772366bf445c954968e4b507dc52dcb29c60e | 28deae4b6f2ef4c83116d8a7e08061b2ac47bb71 | /Spider/commentbox/spider/encrypt.py | 71a3b95bbdf67d38fd3744475f576aec28cdd9eb | [
"MIT",
"Apache-2.0"
] | permissive | Danceiny/HackGirlfriend | 9cc796c733be7055799efb1c51f1e5ecb3d12d81 | d64f43c5cfb48d30ed812e34fb19bc7b90ba01f8 | refs/heads/master | 2023-01-04T16:09:55.205094 | 2017-07-22T16:48:59 | 2017-07-22T16:48:59 | 93,874,976 | 2 | 1 | Apache-2.0 | 2022-12-26T20:14:57 | 2017-06-09T15:57:34 | HTML | UTF-8 | Python | false | false | 1,611 | py | # coding=utf-8
import os
import base64
import platform
import json
if platform.system() == 'Darwin':
try:
import crypto
import sys
sys.modules['Crypto'] = crypto
except ImportError:
pass
from Crypto.Cipher import AES
# https://github.com/darknessomi/musicbox/wiki/网易云音乐新版WebAPI分析
def aes_encrypt(text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey, 2, '0102030405060708')
ciphertext = encryptor.encrypt(text)
ciphertext = base64.b64encode(ciphertext)
return ciphertext
def rsa_encrypt(text, pubKey, modulus):
text = text[::-1]
rs = int(text.encode('hex'), 16)**int(pubKey, 16) % int(modulus, 16)
return format(rs, 'x').zfill(256)
def create_secretKey(size):
return ''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size)))[0:16]
def gen_data():
text = {
'username': '邮箱',
'password': '密码',
'rememberLogin': 'true'
}
modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'
nonce = '0CoJUm6Qyw8W8jud'
pubKey = '010001'
text = json.dumps(text)
secKey = create_secretKey(16)
encText = aes_encrypt(aes_encrypt(text, nonce), secKey)
encSecKey = rsa_encrypt(secKey, pubKey, modulus)
data = {
'params': encText,
'encSecKey': encSecKey
}
return data
| [
"[email protected]"
] | |
bae8eb019762143945ce74fa7330120d0ad3a8b3 | e7e536df0263ae2a7ac44ef30f19110f891213a9 | /src/tests/api/test_api_reviews.py | 8af79849d889a34659681f05b76196ee11d9e8d8 | [
"Apache-2.0"
] | permissive | pretalx/pretalx | b3b3808266f4810dfc8445dc1ed33ba398e7a9c2 | 269dce90a6fb1ce0064008c40ce5dd4dad61e2e3 | refs/heads/main | 2023-09-05T11:09:23.538325 | 2023-09-04T19:57:47 | 2023-09-04T19:57:47 | 83,081,285 | 563 | 195 | Apache-2.0 | 2023-09-13T19:12:28 | 2017-02-24T20:46:51 | Python | UTF-8 | Python | false | false | 3,279 | py | import json
import pytest
from django_scopes import scope
from pretalx.api.serializers.review import ReviewSerializer
@pytest.mark.django_db
def test_review_serializer(review):
with scope(event=review.event):
data = ReviewSerializer(review).data
assert set(data.keys()) == {
"id",
"answers",
"submission",
"user",
"text",
"score",
"created",
"updated",
}
assert data["submission"] == review.submission.code
assert data["user"] == review.user.name
assert data["answers"] == []
@pytest.mark.django_db
def test_anon_cannot_see_reviews(client, event, review):
response = client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 0, content
@pytest.mark.django_db
def test_orga_can_see_reviews(orga_client, event, review):
response = orga_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1
@pytest.mark.django_db
def test_orga_cannot_see_reviews_of_deleted_submission(orga_client, event, review):
review.submission.state = "deleted"
review.submission.save()
response = orga_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 0
@pytest.mark.django_db
def test_reviewer_can_see_reviews(review_client, event, review, other_review):
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 2, content
@pytest.mark.django_db
def test_reviewer_can_see_reviews_by_track(
review_client, review_user, event, review, other_review, track, other_track
):
review.submission.track = track
review.submission.save()
other_review.submission.track = other_track
other_review.submission.save()
review_user.teams.filter(is_reviewer=True).first().limit_tracks.add(track)
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1, content
@pytest.mark.django_db
def test_reviewer_can_filter_by_submission(review_client, event, review, other_review):
response = review_client.get(
event.api_urls.reviews + f"?submission__code={review.submission.code}",
follow=True,
)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1, content
@pytest.mark.django_db
def test_reviewer_cannot_see_review_to_own_talk(
review_user, review_client, event, review, other_review
):
other_review.submission.speakers.add(review_user)
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1, content
| [
"[email protected]"
] | |
ae0818619246b3bb7d794ba9dc5038f83db79eed | f89d70fc8bf370ef4e2aa54c7ee0de3b4a053624 | /scripts/patches/codepipeline.py | 20b98f8526ecd6469e0a36ff5f6078f7b847e0da | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yks0000/troposphere | a7622bff01c31f10dcb296d2ca353144e1d7f793 | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | refs/heads/main | 2022-04-28T03:51:42.770881 | 2022-04-15T15:15:01 | 2022-04-15T15:15:01 | 482,753,190 | 1 | 0 | BSD-2-Clause | 2022-04-18T07:20:42 | 2022-04-18T07:20:42 | null | UTF-8 | Python | false | false | 2,244 | py | patches = [
# backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.StageTransition",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.DisableInboundStageTransitions",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::CodePipeline::Pipeline/Properties/DisableInboundStageTransitions/ItemType",
"value": "DisableInboundStageTransitions",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.StageDeclaration",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Stages",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::CodePipeline::Pipeline/Properties/Stages/ItemType",
"value": "Stages",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.InputArtifact",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.InputArtifacts",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.ActionDeclaration/Properties/InputArtifacts/ItemType",
"value": "InputArtifacts",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.OutputArtifact",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.OutputArtifacts",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.ActionDeclaration/Properties/OutputArtifacts/ItemType",
"value": "OutputArtifacts",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.ActionDeclaration",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Actions",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Stages/Properties/Actions/ItemType",
"value": "Actions",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.BlockerDeclaration",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Blockers",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Stages/Properties/Blockers/ItemType",
"value": "Blockers",
},
]
| [
"[email protected]"
] | |
2f47b872200e92c1dd739ecfba7b29d356bbc5c9 | ae8590dc2dd0dd6530868ccd52702d06e5d96fa1 | /set.py | db1f8968df305dbd882d4f03ee932777ea1fa60b | [] | no_license | abhisek08/Python-Basics-Part-1- | e3bec8e4d7f9e484c4bcade7763842334c93f4b0 | 3687dd6ebb01f2289b3fa226cea28b564894a68f | refs/heads/master | 2022-09-08T11:42:28.871012 | 2020-05-25T07:58:01 | 2020-05-25T07:58:01 | 266,717,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | '''
Write a Python program to print out a set containing all the colors from color_list_1 which are not present in color_list_2. Go to the editor
Test Data :
color_list_1 = set(["White", "Black", "Red"])
color_list_2 = set(["Red", "Green"])
Expected Output :
{'Black', 'White'}
'''
color_list_1 = set(["White", "Black", "Red"])
color_list_2 = set(["Red", "Green"])
set3=set()
for a in color_list_1:
if a not in color_list_2:
set3.add(a)
print(set3)
| [
"[email protected]"
] | |
61795a374265bfd7628a5a4f8567cea6a4871501 | 41de4210af23a8a8a3ca7dd090bb51faecf4a0c8 | /lib/python3.5/site-packages/statsmodels/tsa/statespace/tests/test_pickle.py | e4143eb73bf4dbaa785ecf20c15a8bc067c18aaf | [
"Python-2.0"
] | permissive | randybrown-github/ziplineMacOS | 42a0c2bfca2a54baa03d2803dc41317647811285 | eb5872c0903d653e19f259f0800fb7aecee0ee5c | refs/heads/master | 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 | Python | UTF-8 | Python | false | false | 5,445 | py | """
Tests for python wrapper of state space representation and filtering
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
from __future__ import division, absolute_import, print_function
from statsmodels.compat.testing import SkipTest
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_allclose
from statsmodels.compat import cPickle
from statsmodels.tsa.statespace import sarimax
from statsmodels.tsa.statespace.kalman_filter import KalmanFilter
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.structural import UnobservedComponents
from .results import results_kalman_filter
# Skip copy test on older NumPy since copy does not preserve order
NP_LT_18 = LooseVersion(np.__version__).version[:2] < [1, 8]
if NP_LT_18:
raise SkipTest("Old NumPy doesn't preserve matrix order when copying")
true = results_kalman_filter.uc_uni
data = pd.DataFrame(
true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
def test_pickle_fit_sarimax():
# Fit an ARIMA(1,1,0) to log GDP
mod = sarimax.SARIMAX(data['lgdp'], order=(1, 1, 0))
pkl_mod = cPickle.loads(cPickle.dumps(mod))
res = mod.fit(disp=-1)
pkl_res = pkl_mod.fit(disp=-1)
assert_allclose(res.llf_obs, pkl_res.llf_obs)
assert_allclose(res.tvalues, pkl_res.tvalues)
assert_allclose(res.smoothed_state, pkl_res.smoothed_state)
assert_allclose(res.resid.values, pkl_res.resid.values)
assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
def test_unobserved_components_pickle():
# Tests for missing data
nobs = 20
k_endog = 1
np.random.seed(1208)
endog = np.random.normal(size=(nobs, k_endog))
endog[:4, 0] = np.nan
exog2 = np.random.normal(size=(nobs, 2))
index = pd.date_range('1970-01-01', freq='QS', periods=nobs)
endog_pd = pd.DataFrame(endog, index=index)
exog2_pd = pd.DataFrame(exog2, index=index)
models = [
UnobservedComponents(endog, 'llevel', exog=exog2),
UnobservedComponents(endog_pd, 'llevel', exog=exog2_pd),
]
for mod in models:
# Smoke tests
pkl_mod = cPickle.loads(cPickle.dumps(mod))
assert_equal(mod.start_params, pkl_mod.start_params)
res = mod.fit(disp=False)
pkl_res = pkl_mod.fit(disp=False)
assert_allclose(res.llf_obs, pkl_res.llf_obs)
assert_allclose(res.tvalues, pkl_res.tvalues)
assert_allclose(res.smoothed_state, pkl_res.smoothed_state)
assert_allclose(res.resid, pkl_res.resid)
assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
def test_kalman_filter_pickle():
# Construct the statespace representation
k_states = 4
model = KalmanFilter(k_endog=1, k_states=k_states)
model.bind(data['lgdp'].values)
model.design[:, :, 0] = [1, 1, 0, 0]
model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
model.selection = np.eye(model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
true['parameters']
)
model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
model.state_cov[
np.diag_indices(k_states) + (np.zeros(k_states, dtype=int),)] = [
sigma_v ** 2, sigma_e ** 2, 0, sigma_w ** 2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states) * 100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(model.transition[:, :, 0], initial_state_cov),
model.transition[:, :, 0].T
)
model.initialize_known(initial_state, initial_state_cov)
pkl_mod = cPickle.loads(cPickle.dumps(model))
results = model.filter()
pkl_results = pkl_mod.filter()
assert_allclose(results.llf_obs[true['start']:].sum(),
pkl_results.llf_obs[true['start']:].sum())
assert_allclose(results.filtered_state[0][true['start']:],
pkl_results.filtered_state[0][true['start']:])
assert_allclose(results.filtered_state[1][true['start']:],
pkl_results.filtered_state[1][true['start']:])
assert_allclose(results.filtered_state[3][true['start']:],
pkl_results.filtered_state[3][true['start']:])
def test_representation_pickle():
nobs = 10
k_endog = 2
endog = np.asfortranarray(np.arange(nobs * k_endog).reshape(k_endog, nobs) * 1.)
mod = Representation(endog, k_states=2)
pkl_mod = cPickle.loads(cPickle.dumps(mod))
assert_equal(mod.nobs, pkl_mod.nobs)
assert_equal(mod.k_endog, pkl_mod.k_endog)
mod._initialize_representation()
pkl_mod._initialize_representation()
assert_equal(mod.design, pkl_mod.design)
assert_equal(mod.obs_intercept, pkl_mod.obs_intercept)
assert_equal(mod.initial_variance, pkl_mod.initial_variance)
| [
"[email protected]"
] | |
62c27b0cf0a5f8a1a68a8aedafbea9941629ddf5 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_available_resource_group_delegations_operations.py | e40effbf3eedfc272c4a8dabfc86149ff292a448 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 5,696 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AvailableResourceGroupDelegationsOperations(object):
"""AvailableResourceGroupDelegationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableDelegationsResult"]
"""Gets all of the available subnet delegations for this resource group in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableDelegationsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.AvailableDelegationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableDelegationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableDelegationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availableDelegations'} # type: ignore
| [
"[email protected]"
] | |
1721d823737b9758f72ff95546378340fdbe225f | b73a66c9593b7aa326c26d4f148606ca100f541e | /corehq/apps/indicators/urls.py | e7409ae140feb38f5a5bb6f54c8f4055d3c30201 | [] | no_license | SEL-Columbia/commcare-hq | c995a921de6d076e777ca2d5d2baed6a8bcd5d7b | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | refs/heads/master | 2021-01-14T14:37:34.391473 | 2014-09-15T21:01:54 | 2014-09-15T21:01:54 | 17,970,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from django.conf.urls.defaults import patterns, url
from corehq import IndicatorAdminInterfaceDispatcher
from corehq.apps.indicators.views import IndicatorAdminCRUDFormView, BulkCopyIndicatorsView
urlpatterns = patterns('corehq.apps.indicators.views',
url(r'^$', 'default_admin', name="default_indicator_admin"),
url(r'^copy/(?P<indicator_type>[\w_]+)/$', BulkCopyIndicatorsView.as_view(), name="indicator_bulk_copy"),
url(r'^form/(?P<form_type>[\w_]+)/(?P<action>[(update)|(new)|(delete)]+)/((?P<item_id>[\w_]+)/)?$',
IndicatorAdminCRUDFormView.as_view(), name="indicator_def_form"),
IndicatorAdminInterfaceDispatcher.url_pattern(),
)
| [
"[email protected]"
] | |
969a750007bbeda78a8b1b964fd7b2f643ca46ca | de3fe8840a7b3eedf4684cb408c859cb0e7a418b | /manage.py | 181b81bc39791fc172dc604cce7ae4470b12eaa0 | [] | no_license | jabykuniyil/fadeeto | e849f4d75516488ddd2ca024af784852935b3afd | 45910ab16fd53891a618810738e542c2dfc2ff98 | refs/heads/main | 2023-03-22T05:00:03.269688 | 2021-03-09T09:36:00 | 2021-03-09T09:36:00 | 337,984,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fadeeto.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0cbb6f41c16ebe936880049ad757b009d9c9d15c | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/agc004/B/4553322.py | 708d9d250a600dde43d899a86623332b0cf0c4bf | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import numpy as np
N, x = map(int, input().split())
a = np.array(list(map(int, input().split())))
b = np.copy(a)
ans = float('inf')
for i in range(N):
c = np.roll(a,i)
b = np.minimum(b,c)
ans = min(ans, sum(b)+i*x)
print(ans) | [
"[email protected]"
] | |
0910059b9001607f6889bee415cd0046879e7eba | 57dccf7b8da26753b66a9eecb9eb6cd1ae5584b5 | /yolov5/backup/yolov5_2.py | 0db2a2eb1f6be065758b4c99caec163f748bed1f | [] | no_license | vbvg2008/benchmarks | 4b743d6b19a4d0b41fa78b8db2a3f3a3f4e86018 | 29e2e445e6701529e048e8ffa283b5b071295566 | refs/heads/master | 2022-12-12T21:50:51.082085 | 2022-12-06T22:09:26 | 2022-12-06T22:09:26 | 187,144,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import pdb
import numpy as np
import torch
from PIL import Image
# Model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=False)
# Images
# img1 = Image.open('zidane.jpg')
# inputs = np.array(img1)
# inputs = np.transpose(inputs, [2, 0, 1])
# pdb.set_trace()
# # Inference
# result = model([inputs])
inputs = torch.rand(3, 720, 1280)
pred = model([inputs])
pdb.set_trace()
| [
"[email protected]"
] | |
321fa041bc8aa7599fc821cd44dae64b4deb545b | 5c883c87f337be7ffd52f49f0a4e6c72bbd58932 | /apps/seguimiento/migrations/0012_auto_20161009_1256.py | 53fe40acab70a7d988dc02796c1d764cf8059d45 | [] | no_license | DARKDEYMON/Tesis-2-Vidaurre-J.C. | f1b0d8e8a593a9d4a585bdd14b21d4809d55ce9f | 4299cea2e990ee798b02724849d747bfd558b97d | refs/heads/master | 2021-06-20T09:25:53.273225 | 2017-05-25T22:20:31 | 2017-05-25T22:20:31 | 65,408,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 16:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seguimiento', '0011_requerimiento_maq_he_requerimientopersonal'),
]
operations = [
migrations.RenameField(
model_name='proyecto',
old_name='plaso_previsto',
new_name='plazo_previsto',
),
]
| [
"[email protected]"
] | |
8403f2004c7f764c1701a784cd86927f379d97bd | 85373d45a83e4096affafa4f4e5b400787413e57 | /test/programytest/parser/template/node_tests/richmedia_tests/test_list.py | 282b8606c75b00ec4a823c41783ef19aa46ed8ab | [
"MIT"
] | permissive | keiffster/program-y | a02bb9d8278835547cc875f4f9cd668d5b1f44da | fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20 | refs/heads/master | 2023-08-23T13:55:39.255535 | 2022-12-13T09:51:57 | 2022-12-13T09:51:57 | 74,462,571 | 379 | 173 | NOASSERTION | 2023-05-23T00:51:21 | 2016-11-22T10:43:41 | Python | UTF-8 | Python | false | false | 969 | py | from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.richmedia.list import TemplateListNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class TemplateListNodeTests(ParserTestsBaseClass):
def test_list_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
list = TemplateListNode()
list._items.append(TemplateWordNode("Item1"))
list._items.append(TemplateWordNode("Item2"))
root.append(list)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
self.assertEqual("<list><item>Item1</item><item>Item2</item></list>", resolved)
self.assertEqual("<list><item>Item1</item><item>Item2</item></list>", root.to_xml(self._client_context))
| [
"[email protected]"
] | |
e86764ade6955c0e9d01a19dd792a7783ffab002 | 230ccae62e975f7bfde062edd32e5a54db888a04 | /programmers/[Level-4]/fail/스티커모으기.py | e07a5aef7d58ef6b2d9cbd566e5f8fe54890943f | [] | no_license | seung-woo-ryu/AlgorithmTest | 6f56ec762dc2c863218c529299a3874ad9fd6c53 | 2b735535dbd447f873650bfb649616b78de34343 | refs/heads/master | 2023-02-03T08:00:19.929711 | 2020-12-12T10:04:07 | 2020-12-12T10:04:07 | 285,925,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | vi = []
li2 = []
answer = 0
def s(n,edges):
global answer
global vi
li2 = [[0 for _ in range(n)] for _ in range(n)]
vi = [0] * n
def re(idx,li2):
global vi
if idx != 0:
for i in range(0,idx):
if li2[idx][i] == 1:
vi[i] += 1
break
re(i,li2)
for x,y in edges:
li2[x][y] = 1
li2[y][x] = 1
for x,y in edges:
if x!= 0:
vi[x] += 1
re(x,li2)
queue = []
temp = set()
temp.add(0)
answer= 0
while temp:
for x in list(temp):
for i in range(x+1,n):
if li2[x][i] == 1:
queue.append(i)
max_index = -1
max_value =-1
for x in queue:
max_temp=0
for k in range(x+1,n):
max_temp = max(max_temp,vi[k])
if vi[x] - max_temp > max_value:
max_index = x
max_value = vi[x] - max_temp
temp = set(queue) - set([max_index])
return n - answer
print(s(19, [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [2, 6], [3, 7], [3, 8], [3, 9], [4, 10], [4, 11], [5, 12], [5, 13], [6, 14], [6, 15], [6, 16], [8, 17], [8, 18]])) | [
"[email protected]"
] | |
d1325713c07c1c46518100d38aa60e1e84a7af95 | 9bc17bffce835eb8e27422e39438bf7bd1af2282 | /pnc_cli/swagger_client/models/page.py | 2ba471e49073d2f40d51ae98c7e9566888ed8e25 | [
"Apache-2.0"
] | permissive | pgier/pnc-cli | c3e7d61c3bce4c1a48b29e5f980b6b72cded3e31 | 4d29a8a7ec749c8843c6e32adb7c9c969e6cc24a | refs/heads/master | 2021-01-15T23:59:08.874319 | 2016-05-02T20:59:48 | 2016-05-02T20:59:48 | 57,930,193 | 0 | 0 | null | 2016-05-03T00:36:34 | 2016-05-03T00:36:33 | null | UTF-8 | Python | false | false | 4,465 | py | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class Page(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Page - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'page_index': 'int',
'page_size': 'int',
'total_pages': 'int',
'content': 'list[PageContent]'
}
self.attribute_map = {
'page_index': 'pageIndex',
'page_size': 'pageSize',
'total_pages': 'totalPages',
'content': 'content'
}
self._page_index = None
self._page_size = None
self._total_pages = None
self._content = None
@property
def page_index(self):
"""
Gets the page_index of this Page.
:return: The page_index of this Page.
:rtype: int
"""
return self._page_index
@page_index.setter
def page_index(self, page_index):
"""
Sets the page_index of this Page.
:param page_index: The page_index of this Page.
:type: int
"""
self._page_index = page_index
@property
def page_size(self):
"""
Gets the page_size of this Page.
:return: The page_size of this Page.
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""
Sets the page_size of this Page.
:param page_size: The page_size of this Page.
:type: int
"""
self._page_size = page_size
@property
def total_pages(self):
"""
Gets the total_pages of this Page.
:return: The total_pages of this Page.
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""
Sets the total_pages of this Page.
:param total_pages: The total_pages of this Page.
:type: int
"""
self._total_pages = total_pages
@property
def content(self):
"""
Gets the content of this Page.
:return: The content of this Page.
:rtype: list[PageContent]
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this Page.
:param content: The content of this Page.
:type: list[PageContent]
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| [
"[email protected]"
] | |
39f9ab976f2acb071d2f4cc6d0b3c49a985bcd32 | 2d4af29250dca8c72b74e190e74d92f1467120a0 | /TaobaoSdk/Request/UmpToolsGetRequest.py | 34245bfd22984b2009657c88f859c074ebb7ee59 | [] | no_license | maimiaolmc/TaobaoOpenPythonSDK | 2c671be93c40cf487c0d7d644479ba7e1043004c | d349aa8ed6229ce6d76a09f279a0896a0f8075b3 | refs/heads/master | 2020-04-06T03:52:46.585927 | 2014-06-09T08:58:27 | 2014-06-09T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 查询工具列表
# @author [email protected]
# @date 2013-09-22 16:52:38
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">查询工具列表</SPAN>
# <UL>
# </UL>
class UmpToolsGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.ump.tools.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">工具编码</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.tool_code = None
| [
"[email protected]"
] | |
cc50b631b320114baf420e8a9698000c87c7eaca | 0b802a3b3572ae4e9be55cb1c116ebcf06cceb4d | /tests/pipupgrade/cli/test_cli__init__.py | 04031a9628d0715815b484f5dbf878b575837d64 | [
"MIT"
] | permissive | todun/pipupgrade | fc8b1315a9b432a75dd78c1783f85cd0147e631b | 2f2e04d77c7e276e4b6172d42b5bdeaae11075fb | refs/heads/master | 2020-06-25T00:43:26.995923 | 2019-06-10T18:46:22 | 2019-06-10T18:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # imports - compatibility imports
from pipupgrade._compat import iteritems, iterkeys
# imports - module imports
from pipupgrade import cli
from pipupgrade.cli import get_args
from pipupgrade.util.types import merge_dict
def test_command():
def _assert_command(values, override = dict(), initial = dict()):
@cli.command
def foobar(*args, **kwargs):
args = get_args()
params = merge_dict(args, override)
for k, v in iteritems(values):
assert params[k] == v
if initial:
for k in iterkeys(initial):
assert initial[k] == args[k]
foobar()
_assert_command(dict(yes = False))
_assert_command(dict(latest = True), dict(latest = True), dict(latest = False)) | [
"[email protected]"
] | |
378ba1016f60d57bd7f16d42e2c06e05626ec211 | 42b3c0d4691df8cfe60177abe7c33d01575f2d9a | /multiThreads/多进程拷贝代码.py | 507e05be7f3bbfd237f703ce6c7499b1ad3191d0 | [] | no_license | richard-ql/pythonNotes | 68d592bdf9f81ea1569b1a5f9a12f5897b98f922 | 27919b2c95cf9ca7443d218488a6edefdb846129 | refs/heads/master | 2021-07-13T23:00:29.126607 | 2021-06-27T16:16:26 | 2021-06-27T16:16:26 | 227,252,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import os
print("hello world")
pid = os.fork()
print("多进程会拷贝os.fork之后的代码")
print(pid)
if pid == 0:
print("son process")
else:
print("father process")
| [
"[email protected]"
] | |
f135315217f58fe37e1088ccf0c094c7fd1d9606 | 09c18cf1d9dc443e43357383030be9b3ce9e2756 | /QUANTAXIS/QAData/__init__.py | 4a2cfb9ae8dfa14a8a5228a58a67d7843913348e | [
"MIT"
] | permissive | zhouji0212/QUANTAXIS | ed47f78be7d78d2888faf01ba5cfe75dca463e06 | 54b2a0c3445d77c7fcd4858100e8bebe6656e940 | refs/heads/master | 2020-04-07T16:56:56.332211 | 2018-12-23T14:44:30 | 2018-12-23T14:44:30 | 141,289,835 | 0 | 0 | MIT | 2018-11-21T12:19:41 | 2018-07-17T12:56:33 | Python | UTF-8 | Python | false | false | 2,386 | py | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from QUANTAXIS.QAData.QASeriesStruct import QA_DataStruct_Series
from QUANTAXIS.QAData.data_fq import QA_data_stock_to_fq
from QUANTAXIS.QAData.data_marketvalue import QA_data_calc_marketvalue, QA_data_marketvalue
from QUANTAXIS.QAData.data_resample import QA_data_tick_resample, QA_data_min_resample, QA_data_day_resample
from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Index_day,
QA_DataStruct_Index_min,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min,
QA_DataStruct_Future_day,
QA_DataStruct_Future_min,
QA_DataStruct_Stock_realtime,
QA_DataStruct_Stock_transaction)
from QUANTAXIS.QAData.QABlockStruct import QA_DataStruct_Stock_block
from QUANTAXIS.QAData.QAFinancialStruct import QA_DataStruct_Financial
from QUANTAXIS.QAData.QAIndicatorStruct import QA_DataStruct_Indicators
from QUANTAXIS.QAData.dsmethods import QDS_StockDayWarpper, QDS_StockMinWarpper, QDS_IndexDayWarpper, QDS_IndexMinWarpper, from_tushare, concat
| [
"[email protected]"
] | |
05fae7a028b9f848821d1fb01887ec4165b34f20 | e2d5b42941f6bd5a5adace442feab1c446f4a997 | /dp-knight-chess-movement.py | 19e4d662b82190f8979d3e4651678f59e7e6ba2b | [] | no_license | yongxuUSTC/challenges | 21601f8f47beed3ef2c733caaf512b39ce00bc69 | 00ece128923511f29c207d42cbf060cae6bafa01 | refs/heads/master | 2021-06-26T01:15:03.436234 | 2020-08-17T19:17:33 | 2020-08-17T19:17:33 | 97,131,133 | 2 | 1 | null | 2017-07-13T14:22:36 | 2017-07-13T14:22:36 | null | UTF-8 | Python | false | false | 2,361 | py | '''
How many different 10-digit numbers can be formed starting from 1?
The constraint is that the movement from 1 digit to the next is similar to the movement of the Knight in a chess game.
Reference: http://stackoverflow.com/questions/2893470/generate-10-digit-number-using-a-phone-keypad
'''
def initialize():
table = [[0 for i in range(3)] for j in range(4)]
values = [1,2,3,4,5,6,7,8,9,None,0,None]
rows = len(table)
cols = len(table[0])
count = 0
for i in range(rows):
for j in range(cols):
table[i][j] = values[count]
count += 1
return table
#given value find coordinates
def getCoordinates(value,table):
rows = len(table)
cols = len(table[0])
for i in range(rows):
for j in range(cols):
if table[i][j] == value:
return([i,j])
#Next Knights move from current coordinates
def nextKnightMove(value,table):
i, j = getCoordinates(value,table)
rows = len(table)
cols = len(table[0])
result = []
#down 3 right
if(i+1 < rows and j+2 < cols and table[i+1][j+2] is not None):
result.append(table[i+1][j+2])
#down 3 left
if(i+1 < rows and j-2 >= 0 and table[i+1][j-2] is not None):
result.append(table[i+1][j-2])
#up 3 right
if(i-1 >= 0 and j+2 < cols and table[i-1][j+2] is not None):
result.append(table[i-1][j+2])
#up 3 left
if(i-1 >= 0 and j-2 >= 0 and table[i-1][j-2] is not None):
result.append(table[i-1][j-2])
#down 1 right
if(i+2 < rows and j+1 < cols and table[i+2][j+1] is not None):
result.append(table[i+2][j+1])
#down 1 left
if(i+2 < rows and j-1 >= 0 and table[i+2][j-1] is not None):
result.append(table[i+2][j-1])
#up 1 right
if(i-2 >= 0 and j+1 < cols and table[i-2][j+1] is not None):
result.append(table[i-2][j+1])
#up 1 left
if(i-2 >=0 and j-1 >= 0 and table[i-2][j-1] is not None):
result.append(table[i-2][j-1])
return result
#http://stackoverflow.com/questions/2893470/generate-10-digit-number-using-a-phone-keypad
def generateTableM(table,mtable,digits,start):
if digits == 1:
return 1
if (mtable[digits][start] == 0):
for next in nextKnightMove(start,table):
mtable[digits][start] += generateTableM(table,mtable,digits-1,next)
#else:
#print("found ...",digits,start)
return mtable[digits][start]
table = initialize()
#memoization table
mtable = [[0 for i in range(10)] for j in range(11)]
print(generateTableM(table,mtable,10,1)) #mtable[10][1] = 1424
| [
"[email protected]"
] | |
911c126a0f974f911bf5b66ca8c23e2cfd9747a3 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/spring/azext_spring/vendored_sdks/appplatform/v2021_06_01_preview/aio/_app_platform_management_client.py | 27281616bb20c88913df623e391e6632539d2a69 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 7,399 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import (
AppsOperations,
BindingsOperations,
CertificatesOperations,
ConfigServersOperations,
CustomDomainsOperations,
DeploymentsOperations,
MonitoringSettingsOperations,
Operations,
RuntimeVersionsOperations,
ServicesOperations,
SkusOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ServicesOperations
:ivar config_servers: ConfigServersOperations operations
:vartype config_servers:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ConfigServersOperations
:ivar monitoring_settings: MonitoringSettingsOperations operations
:vartype monitoring_settings:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.MonitoringSettingsOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.RuntimeVersionsOperations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.SkusOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-06-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppPlatformManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.services = ServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.config_servers = ConfigServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.monitoring_settings = MonitoringSettingsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.apps = AppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
68b882f21b82ee98e1f7d0034f05ab3e7456ca93 | 2fc4ccffe5c557602302f087ae296fd31c0c1c2e | /apps/backups/serializers.py | 8dc1c7418ea730901f3e574b5a9e84ba57ccd033 | [] | no_license | Duyshg/syncano-platform | 7cfee3f877f761deaa5fb2e70f89deba4f90cb05 | ea645f998edb80d5e1c6eca5ae9f7beb37d4e711 | refs/heads/master | 2020-04-25T20:47:32.717475 | 2019-02-14T17:49:06 | 2019-02-14T17:49:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,496 | py | # coding=UTF8
from django.conf import settings
from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField
from rest_framework.serializers import ModelSerializer, ValidationError
from apps.admins.serializers import AdminFullSerializer
from apps.core.exceptions import PermissionDenied
from apps.core.field_serializers import DisplayedChoiceField, JSONField
from apps.core.mixins.serializers import HyperlinkedMixin, MetadataMixin
from .models import Backup, Restore
from .site import default_site
class BackupSerializer(MetadataMixin, ModelSerializer):
instance = SlugRelatedField(slug_field='name',
required=False,
read_only=True,
allow_null=False)
status = DisplayedChoiceField(Backup.STATUSES.as_choices(), read_only=True)
author = AdminFullSerializer(read_only=True, source="owner")
details = JSONField(read_only=True)
class Meta:
model = Backup
read_only_fields = ('id', 'instance', 'created_at', 'updated_at',
'archive', 'size', 'status', 'status_info', 'author', 'details')
fields = read_only_fields + ('description', 'label', 'query_args', 'metadata')
extra_kwargs = {'description': {'required': False}, 'label': {'required': False}}
class FullBackupSerializer(HyperlinkedMixin, BackupSerializer):
hyperlinks = (
('self', 'full_backups-toplevel-detail', ('id',)),
)
class Meta(BackupSerializer.Meta):
fields = ('id', 'instance', 'created_at', 'updated_at', 'size',
'status', 'status_info', 'description', 'label', 'author', 'details', 'metadata')
class PartialBackupSerializer(HyperlinkedMixin, BackupSerializer):
hyperlinks = (
('self', 'partial_backups-toplevel-detail', ('id',)),
)
query_args = JSONField(required=True, validators=[default_site.validate_query_args], write_only=True,
schema=lambda: default_site.jsonschema)
class RestoreSerializer(HyperlinkedMixin, ModelSerializer):
hyperlinks = (
('self', 'restores-detail', ('instance.name', 'id')),
)
backup = PrimaryKeyRelatedField(required=False, allow_null=True,
queryset=Backup.objects.none())
status = DisplayedChoiceField(Backup.STATUSES.as_choices(), read_only=True)
author = AdminFullSerializer(read_only=True, source="owner")
class Meta:
model = Restore
fields = ('id', 'backup', 'created_at', 'updated_at', 'status', 'archive', 'status_info', 'author')
read_only_fields = ('created_at', 'id', 'status', 'status_info', 'author')
def get_fields(self):
fields = super().get_fields()
if 'request' in self.context:
fields['backup'].queryset = Backup.objects.filter(
owner=self.context['view'].request.user,
status=Backup.STATUSES.SUCCESS,
location=settings.LOCATION,
)
return fields
def validate(self, attrs):
has_archive = bool(attrs.get('archive', False))
has_backup = bool(attrs.get('backup', False))
if has_backup and has_archive or (not has_backup and not has_archive):
raise ValidationError('You have to provide either backup or archive.')
if has_archive and not self.context['request'].user.is_staff:
raise PermissionDenied()
return super().validate(attrs)
| [
"[email protected]"
] | |
7c7705efd2928f8d5566e1d078bd5e130c52912c | 5aad0901bba97bdec3e8ad576abdcb780cc7f99e | /experiment/surprise/prediction_algorithms/item_rel_tags.py | 41ae0831820f49417af314ac8db066d57b73a2c7 | [] | no_license | HelloYym/Cross-TTCF | 544f2322d25855586bf517bb769e94ffd112e847 | d4504af02a7d0dcc1b5c59aba33ba9bc897e381d | refs/heads/master | 2021-06-19T01:04:32.401074 | 2017-06-07T05:53:29 | 2017-06-07T05:53:29 | 86,427,595 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from six.moves import range
import copy
from .algo_base import AlgoBase
class ItemRelTags(AlgoBase):
def __init__(self, n_factors=100, n_epochs=20, biased=True, lr_all=.005,
reg_all=.02, lr_bu=None, lr_bi=None, lr_pu=None, lr_qi=None,
reg_bu=None, reg_bi=None, reg_pu=None, reg_qi=None,
confidence = 0.95,
verbose=False):
self.n_factors = n_factors
self.n_epochs = n_epochs
self.biased = biased
self.lr_all = lr_all
self.lr_bu = lr_bu if lr_bu is not None else lr_all
self.lr_bi = lr_bi if lr_bi is not None else lr_all
self.lr_pu = lr_pu if lr_pu is not None else lr_all
self.lr_qi = lr_qi if lr_qi is not None else lr_all
self.reg_all = reg_all
self.reg_bu = reg_bu if reg_bu is not None else reg_all
self.reg_bi = reg_bi if reg_bi is not None else reg_all
self.reg_pu = reg_pu if reg_pu is not None else reg_all
self.reg_qi = reg_qi if reg_qi is not None else reg_all
self.confidence = confidence
self.verbose = verbose
AlgoBase.__init__(self)
self.estimate_with_tags = True
def train(self, trainset):
trainset.rank_sum_test(confidence=self.confidence)
trainset.construct()
AlgoBase.train(self, trainset)
self.sgd(trainset)
def sgd(self, trainset):
# user biases
bu = np.zeros(trainset.n_users, np.double)
# item biases
bi = np.zeros(trainset.n_items, np.double)
# user factors
pu = np.random.random((trainset.n_users, self.n_factors)
) / np.sqrt(self.n_factors)
# item factors
qi = np.random.random((trainset.n_items, self.n_factors)
) / np.sqrt(self.n_factors)
# tag factors
yt = np.zeros((trainset.n_tags,
self.n_factors), np.double)
lr_all = self.lr_all
lr_bu = self.lr_bu
lr_bi = self.lr_bi
lr_pu = self.lr_pu
lr_qi = self.lr_qi
reg_all = self.reg_all
reg_bu = self.reg_bu
reg_bi = self.reg_bi
reg_pu = self.reg_pu
reg_qi = self.reg_qi
global_mean = trainset.global_mean
for current_epoch in range(self.n_epochs):
if self.verbose:
print("Processing epoch {}".format(current_epoch))
for u, i, r in trainset.all_ratings():
item_tags = trainset.get_item_tags(i)
n_tags = max(1, sum(item_tags.values()))
sum_yt = np.sum(
[yt[tid] * freq for tid, freq in item_tags.items()], axis=0) / n_tags
# compute current error
dot = np.dot((qi[i] + sum_yt), pu[u])
err = r - (global_mean + bu[u] + bi[i] + dot)
# update biases
if self.biased:
bu[u] += lr_bu * (err - reg_bu * bu[u])
bi[i] += lr_bi * (err - reg_bi * bi[i])
# update factors
pu[u] += lr_pu * (err * (qi[i] + sum_yt) - reg_pu * pu[u])
qi[i] += lr_qi * (err * pu[u] - reg_qi * qi[i])
for t, freq in item_tags.items():
yt[t] += lr_all * \
(pu[u] * freq * (err / n_tags) - reg_all * yt[t])
self.bu = bu
self.bi = bi
self.pu = pu
self.qi = qi
self.yt = yt
def estimate(self, u, i, tags):
est = self.trainset.global_mean
if self.trainset.knows_user(u):
est += self.bu[u]
if self.trainset.knows_item(i):
est += self.bi[i]
if self.trainset.knows_user(u) and self.trainset.knows_item(i):
item_tags = copy.deepcopy(self.trainset.get_item_tags(i))
yt_cnt = max(sum(item_tags.values()), 1)
yt_sum = np.sum([self.yt[tid] * freq for tid,
freq in item_tags.items()], axis=0) / yt_cnt
est += np.dot((self.qi[i] + yt_sum), self.pu[u])
return est
| [
"[email protected]"
] | |
896785f9a67cae451dd0cc416ffc28e3f1afa9a3 | 456a87fc1d6c6ea29063b542a4ae3d636577a56d | /06_Python_Fonksiyonlar/04_function-demos.py | d9240c2dede075c4739ac482a7520918bb307646 | [] | no_license | dyedefRa/python_bastan_sona_sadik_turan | baca8a8e05321e21bcd9d0c2bd97504d93ae8c33 | a289501b408a26c4036d68968001e2b4a6a57da7 | refs/heads/master | 2021-03-04T12:28:48.481785 | 2020-02-26T12:07:35 | 2020-02-26T12:07:35 | 246,033,399 | 1 | 0 | null | 2020-03-09T12:45:54 | 2020-03-09T12:45:54 | null | UTF-8 | Python | false | false | 2,142 | py | # 1- Gönderilen bir kelimeyi belirtilen kez ekranda gösteren fonksiyonu yazın.
'''
word = input('word : ')
count = int(input('count : '))
def yazdir(word,count):
for n in range(0,count):
print(word)
yazdir(word,count)
def yazdir2(word,count):
print(word*count)
yazdir2(word+'\n',count)
'''
# 2- Kendine gönderilen sınırsız sayıdaki parametreyi bir listeye çeviren fonksiyonu yazınız.
'''
def listeyeCevir(*params):
liste = params
return liste
print(listeyeCevir('meti','oguzhan',1986,'[email protected]'))
def listeyeCevir2(*params):
liste = []
for n in params:
liste.append(n)
return liste
print(listeyeCevir2(10,20,30,40,50,60,'Merhaba'))
'''
# 3- Gönderilen 2 sayı arasındaki tüm asal sayıları bulun.
'''
def asalSayiBulma(baslangic, bitis):
asalSayiListesi = []
isPrime = True
for n in range(baslangic, bitis+1):
if(baslangic>1):
for bolen in range(2, n):
if n % bolen == 0:
isPrime = False
break
else:
isPrime = True
else:
isPrime = True
if isPrime:
asalSayiListesi.append(n)
return asalSayiListesi
print(asalSayiBulma(2, 19))
def asalSayilariBul(sayi1, sayi2):
for sayi in range(sayi1, sayi2+1):
if(sayi1>1):
for i in range(2,sayi):
if sayi % i == 0:
break
else:
print(sayi)
sayi1 = int(input('sayı 1 : '))
sayi2 = int(input('sayı 2 : '))
asalSayilariBul(sayi1,sayi2)
'''
# 4- Kendisine gönderilen bir sayının tam bölenlerini bir liste şeklinde döndürünüz.
def tamBolenListesi(sayi):
for n in range(1,sayi + 1):
if(sayi % n == 0):
print(n)
else:
continue
sayi = int(input('sayı : '))
tamBolenListesi(sayi)
def tamBolenleriBul(sayi):
tamBolenler = []
for i in range(2,sayi):
if(sayi % i == 0):
tamBolenler.append(i)
return tamBolenler
print(tamBolenleriBul(90)) | [
"[email protected]"
] | |
bb49652434b8ca4ec5c349ea55e365ace5ea5f8a | d1ad901e1e926d9c92ce4dc7a7ba3c6ee91a65e2 | /spytest/spytest/remote/spytest-helper.py | 1027df441c2a4d722f0faad8c05de818411ff3d8 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | SubhajitPalKeysight/sonic-mgmt | ff59c2c5baf53cc2575aea2d541278fc9cf56977 | e4b308a82572996b531cc09cbc6ba98b9bd283ea | refs/heads/master | 2022-12-31T01:03:47.757864 | 2020-10-15T11:04:37 | 2020-10-15T11:04:37 | 286,815,154 | 1 | 1 | NOASSERTION | 2020-08-11T18:08:34 | 2020-08-11T18:08:33 | null | UTF-8 | Python | false | false | 43,100 | py | #!/usr/bin/python
"""
This file is used to apply the configs on DUT.
This fiel will be uploaded to DUT and executed there.
Please be sure before changing the file.
"""
import os
import re
import glob
import json
import socket
import filecmp
import argparse
import subprocess
g_use_config_replace = False
g_community_build = False
g_breakout_native = False
g_breakout_file = None
g_debug = False
syslog_levels=['emerg', 'alert', 'crit', 'err', 'warning', 'notice', 'info', 'debug', 'none']
minigraph_file = "/etc/sonic/minigraph.xml"
config_file = "/etc/sonic/config_db.json"
tmp_config_file = "/tmp/config_db.json"
copp_config_file = "/etc/swss/config.d/00-copp.config.json"
tmp_copp_file = "/tmp/copp.json"
frr_config_file = "/etc/sonic/frr/frr.conf"
tmp_frr_file = "/tmp/frr.conf"
syslog_file = "/etc/rsyslog.d/99-default.conf"
tmp_syslog_file = "/tmp/rsyslog-default.conf"
spytest_dir = "/etc/spytest"
init_config_file = spytest_dir + "/init_config_db.json"
base_config_file = spytest_dir + "/base_config_db.json"
module_config_file = spytest_dir + "/module_config_db.json"
init_frr_config_file = spytest_dir + "/init_frr.conf"
base_frr_config_file = spytest_dir + "/base_frr.conf"
module_frr_config_file = spytest_dir + "/module_frr.conf"
init_copp_config_file = spytest_dir + "/init_copp.json"
base_copp_config_file = spytest_dir + "/base_copp.json"
module_copp_config_file = spytest_dir + "/module_copp.json"
init_minigraph_file = spytest_dir + "/init_minigraph.xml"
base_minigraph_file = spytest_dir + "/base_minigraph.xml"
module_minigraph_file = spytest_dir + "/module_minigraph.xml"
tech_support_timestamp = spytest_dir + "/tech_support_timestamp.txt"
port_config_file = "/usr/share/sonic/device"
cores_tar_file_name = "/tmp/allcorefiles.tar.gz"
kdump_tar_file_name = "/tmp/allkdumpfiles.tar.gz"
def trace(msg):
if g_debug:
print(msg)
def read_port_inifile():
"""
This proc is to get the last port number in the file port_config.ini
:return:
"""
(Platform, HwSKU) = get_hw_values()
int_file = port_config_file + '/' + Platform + '/' + HwSKU + '/' + 'port_config.ini'
int_file = 'cat ' + int_file + ' ' + '| ' + 'tail -1'
output = execute_check_cmd(int_file)
port = output.split(" ")[0]
return port
def get_port_status(port):
"""
This proc is used to get the given port status.
:param port:
:return:
"""
output = execute_check_cmd("show interfaces status {}".format(port))
if output != "":
return output
return
def iterdict(d):
new_dict = {}
for k, v in d.items():
if isinstance(v,dict):
v = iterdict(v)
try:
new_dict[k] = int(v)
except:
new_dict[k] = v
return new_dict
def read_lines(file_path, default=None):
try:
with open(file_path, "r") as infile:
return infile.readlines()
except Exception as exp:
if default is None:
raise exp
return default
def read_offset(file_path):
lines = read_lines(file_path, [])
offset = int(lines[0].split()[0]) if lines else 0
return (file_path, offset)
def write_offset(file_path, retval, add=0):
try:
lines = retval.split()
offset = add + int(lines[0].split()[0])
with open(file_path, "w") as infile:
infile.write("{} unused".format(offset))
except: pass
def execute_from_file(file_path):
execute_cmds(read_lines(file_path))
def execute_cmds(cmds):
retval = []
for cmd in cmds:
retval.append(execute_check_cmd(cmd))
return "\n".join(retval)
def execute_check_cmd(cmd, show=True, skip_error=False):
retval = ""
try:
if show:
print("Remote CMD: '{}'".format(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
proc.wait()
if not skip_error and proc.returncode != 0:
retval = "Error: Failed to execute '{}' ('{}')\n".format(cmd, err.strip())
if out.strip() != "":
retval = retval + out.strip()
except:
retval = "Error: Exception occurred while executing the command '{}'".format(cmd)
if retval.strip() != "":
print(retval)
return retval
def run_as_system_cmd(cmd, show=True):
retcode = None
try:
if show:
print("Remote CMD: '{}'".format(cmd))
retcode = os.system(cmd)
if retcode != 0:
print("Error: Failed to execute '{}'. Return code is '{}'".format(cmd, retcode))
except:
print("Error: Exception occurred while executing the command '{}'".format(cmd))
return retcode
def get_mac_address():
syseeprom = execute_check_cmd("show platform syseeprom").split("\n")
for line in syseeprom:
match = re.match(r"^Base MAC Address\s+0x\d+\s+6\s+(\S+)", line)
if match:
return match.group(1)
return None
def get_hw_values():
platform = None
hwsku = None
platform_summ = execute_check_cmd("show platform summary").split("\n")
for line in platform_summ:
if not platform:
match = re.match(r"^Platform:\s+(\S+)", line)
if match:
platform = match.group(1)
if not hwsku:
match = re.match(r"^HwSKU:\s+(\S+)", line)
if match:
hwsku = match.group(1)
return (platform, hwsku)
def read_json(filepath):
return eval(open(filepath, 'rU').read())
def get_file_diff(file1, file2, show_diff=False):
if filecmp.cmp(file1, file2):
# files have same content
return True
# compare the dictionaries
file1_dict = read_json(file1)
file2_dict = read_json(file2)
f1_dict = iterdict(dict((k, v) for k, v in file1_dict.items() if v))
f2_dict = iterdict(dict((k, v) for k, v in file2_dict.items() if v))
if f1_dict == f2_dict:
# dictionaries are same
return True
# the files have different content
if show_diff:
print("Content in the files '{}' '{}' is different".format(file1, file2))
return False
def json_fix(filepath):
data = open(filepath, 'rU').read()
try:
obj = json.loads(data)
except:
print("invalid json - trying to fix")
# remove trailing object comma
regex = re.compile(r'(,)\s*}(?=([^"\\]*(\\.|"([^"\\]*\\.)*[^"\\]*"))*[^"]*$)')
data = regex.sub("}", data)
# remove trailing array comma
regex = re.compile(r'(,)\s*\](?=([^"\\]*(\\.|"([^"\\]*\\.)*[^"\\]*"))*[^"]*$)')
data = regex.sub("]", data)
try:
obj = json.loads(data)
except:
raise ValueError("invalid json data")
dst_file = "{}.new".format(filepath)
with open(dst_file, 'w') as outfile:
json.dump(obj, outfile, indent=4)
return dst_file
def backup_file(file_path):
file_name = os.path.basename(file_path)
golden_file = spytest_dir + "/{}.golden".format(file_name)
backup_filepath = spytest_dir + "/{}.backup".format(file_name)
if not os.path.exists(golden_file):
execute_check_cmd("cp {} {}".format(file_path, golden_file))
execute_check_cmd("cp {} {}".format(file_path, backup_filepath))
def backup_swss_docker_file(file_path):
file_name = os.path.basename(file_path)
golden_file = spytest_dir + "/{}.golden".format(file_name)
backup_filepath = spytest_dir + "/{}.backup".format(file_name)
if not os.path.exists(golden_file):
execute_check_cmd("docker cp swss:{} {}".format(file_path, golden_file))
execute_check_cmd("docker cp swss:{} {}".format(file_path, backup_filepath))
return backup_filepath
def apply_file(filepath, method):
commands_to_execute = []
if filepath.endswith('.json'):
filepath = json_fix(filepath)
if method == "full":
commands_to_execute.append("cp {} {}".format(filepath, init_config_file))
else:
commands_to_execute.append("config load -y {}".format(filepath))
commands_to_execute.append("config save -y")
elif filepath.endswith('.copp'):
filepath = json_fix(filepath)
if method == "full":
commands_to_execute.append("cp {} {}".format(filepath, init_copp_config_file))
else:
backup_swss_docker_file(copp_config_file)
commands_to_execute.append("docker cp {} swss:{}".format(filepath, copp_config_file))
elif filepath.endswith('.xml'):
if method == "full":
commands_to_execute.append("cp {} {}".format(filepath, init_minigraph_file))
else:
backup_file(minigraph_file)
commands_to_execute.append("cp {} {}".format(filepath, minigraph_file))
commands_to_execute.append("config load_minigraph -y")
commands_to_execute.append("config save -y")
elif filepath.endswith('.frr'):
if method == "full":
commands_to_execute.append("cp {} {}".format(filepath, init_frr_config_file))
else:
backup_file(frr_config_file)
commands_to_execute.append("cp {} {}".format(filepath, frr_config_file))
elif filepath.endswith('.sh'):
commands_to_execute.append("bash {}".format(filepath))
elif filepath.endswith('.py'):
commands_to_execute.append("python {}".format(filepath))
elif filepath.endswith('.bcm') or filepath.endswith('.ini') or filepath.endswith('.j2'):
# Execute the command "show platform summary" and get the Platform and HwSKU values.
(Platform, HwSKU) = get_hw_values()
# Construct the path where we can found the Platform and HwSKU details.
device_files_loc = "/usr/share/sonic/device"
dut_file_location = "{}/{}/{}/".format(device_files_loc,Platform,HwSKU)
basename = os.path.basename(filepath)
old_file = os.path.join(dut_file_location, basename)
if not os.path.exists(old_file + ".orig"):
commands_to_execute.append("cp {} {}.orig".format(old_file, old_file))
commands_to_execute.append("cp {} {}".format(filepath, old_file))
if commands_to_execute:
execute_cmds(commands_to_execute)
else:
print("Error: Invalid file format {}.".format(filepath))
def parse_and_apply_files(names, method):
ensure_mac_address(config_file)
if type(names) is str:
apply_file(names, method)
elif type(names) is list:
for filename in names:
parse_and_apply_files(filename, method)
def clean_core_files(flag):
if flag == "YES":
print("remove core files files")
execute_check_cmd("rm -f /var/core/*.core.gz")
def clean_dump_files(flag):
if flag == "YES":
print("remove techsupport dump files")
execute_check_cmd("rm -f /var/dump/*.tar.gz")
def clear_techsupport(flag):
if flag == "YES":
print("remove core dumps and techsupport till now using CLI command.")
execute_check_cmd("sonic-clear techsupport till 'now' -y")
def init_clean(flags):
[core_flag, dump_flag, clear_flag] = flags.split(",")
# remove core files
clean_core_files(core_flag)
# remove techsupport dump files
clean_dump_files(dump_flag)
# remove core dumps and techsupport till now using CLI command.
clear_techsupport(clear_flag)
# disable syslog messages
print("disable syslog messages")
enable_disable_debug(False)
# clear syslog messages
execute_check_cmd("rm -f {}/syslog.*".format(spytest_dir))
execute_check_cmd("rm -f {}/sairedis.*".format(spytest_dir))
execute_check_cmd("logrotate -f /etc/logrotate.conf", skip_error=True)
def init_ta_config(flags, profile):
init_clean(flags)
if profile == "na":
create_default_base_config()
elif profile == "l2" or profile == "l3":
create_profile_base_config(profile)
# save current timestamp
run_as_system_cmd("date > {}".format(tech_support_timestamp))
# remove syslogs and sairedis files
print("clear syslog and sairedis files")
execute_check_cmd("logrotate -f /etc/logrotate.conf", skip_error=True)
execute_check_cmd("rm -f /var/log/syslog.*")
execute_check_cmd("rm -f /var/log/swss/sairedis.rec.*")
print("DONE")
def create_default_base_config():
print("default base config")
# Clean the spytest directory - copp files are also saved as json
for extn in ["json", "conf", "xml"]:
execute_check_cmd("rm -f {}/*.{}".format(spytest_dir, extn))
# remove init configs
for filename in [init_copp_config_file, init_minigraph_file, \
init_frr_config_file]:
execute_check_cmd("rm -f {}".format(filename))
# save the config to init file.
execute_check_cmd("config save -y {}".format(init_config_file))
file_dict = read_json(init_config_file)
# remove all the unnecessary sections from init file
print("remove all the unnecessary sections")
retain = ['DEVICE_METADATA', 'PORT', 'FLEX_COUNTER_TABLE', "MGMT_PORT"]
if os.getenv("SPYTEST_NTP_CONFIG_INIT", "0") != "0":
retain.append("NTP_SERVER")
if os.getenv("SPYTEST_CLEAR_MGMT_INTERFACE", "0") == "0":
retain.append("MGMT_INTERFACE")
for key in file_dict.keys():
if key not in retain:
del file_dict[key]
# enable docker_routing_config_mode
print("enable docker_routing_config_mode")
if "DEVICE_METADATA" in file_dict:
if "localhost" in file_dict["DEVICE_METADATA"]:
file_dict["DEVICE_METADATA"]["localhost"]["docker_routing_config_mode"] = "split"
if os.getenv("SPYTEST_CLEAR_DEVICE_METADATA_HOSTNAME", "0") == "0":
file_dict["DEVICE_METADATA"]["localhost"]["hostname"] = "sonic"
# enable all ports
print("enable all ports")
if "PORT" in file_dict:
port_dict = file_dict['PORT']
for k, v in port_dict.items():
v["admin_status"] = "up"
# save the configuration to init file
with open(init_config_file, 'w') as outfile:
json.dump(file_dict, outfile, indent=4)
def create_profile_base_config(profile):
print("{} base config".format(profile))
# save the config to init file.
execute_check_cmd("config save -y {}".format(init_config_file))
print("DONE")
def apply_config_profile(profile):
if profile == "na":
print("Skipping the profile config as it is not required for 'NA'.")
else:
output = execute_check_cmd("show config profiles")
match = re.match(r"Factory Default:\s+(\S+)", output)
if match and profile == match.group(1):
execute_check_cmd("rm -rf {}".format(config_file))
execute_check_cmd("config profile factory {} -y".format(profile))
print("DONE")
def update_reserved_ports(port_list):
# If no init config_db.json return back.
if not os.path.exists(init_config_file):
print("==============================================================================")
print("===================== DEFAULT INIT CONFIG FILE IS MISSING ====================")
print("==============================================================================")
print("NOFILE")
return
file_dict = read_json(init_config_file)
# Change reserved port state
print("Change the reserved port state to down in config-db")
if "PORT" in file_dict:
port_dict = file_dict['PORT']
for k, v in port_dict.items():
if k not in port_list:
continue
v["admin_status"] = "down"
# save the configuration to init file
with open(init_config_file, 'w') as outfile:
json.dump(file_dict, outfile, indent=4)
print("DONE")
def wait_for_ports_2(port_init_wait, poll_for_ports):
# Wait for last port to be available
port_num = read_port_inifile()
output = []
if poll_for_ports == "yes":
for iter in range(0, port_init_wait/2):
retval = execute_check_cmd("grep -r PortInitDone /var/log/syslog", skip_error=True)
port_info = get_port_status(port_num)
if port_info:
output.append(port_info)
if "PortInitDone" in retval:
output.append(retval)
break
if port_info and port_num in port_info:
break
execute_check_cmd("sleep 2", False)
else:
execute_check_cmd("sleep {}".format(port_init_wait))
return "\n".join(output)
def wait_for_ports(port_init_wait, poll_for_ports):
if port_init_wait == 0:
return
# use older mechanism for community build
if g_community_build:
return wait_for_ports_2(port_init_wait, poll_for_ports)
if poll_for_ports == "yes":
# Wait for last port to be available
port_num = read_port_inifile()
for iter in range(0, port_init_wait/2):
port_info = get_port_status(port_num)
retval = execute_check_cmd("show system status")
if "System is ready" in retval:
break
if port_info and port_num in port_info:
break
execute_check_cmd("sleep 2", False)
else:
execute_check_cmd("sleep {}".format(port_init_wait))
# check if the MAC address is present in config_db.json
def ensure_mac_address(filepath):
file_dict = read_json(filepath)
if "DEVICE_METADATA" in file_dict:
if "localhost" in file_dict["DEVICE_METADATA"]:
if "mac" not in file_dict["DEVICE_METADATA"]["localhost"]:
print("============ Recovering MAC address =======")
mac = get_mac_address()
file_dict["DEVICE_METADATA"]["localhost"]["dmac"] = mac
with open(filepath, 'w') as outfile:
json.dump(file_dict, outfile, indent=4)
print("===========================================")
def do_config_reload(filename=""):
if g_use_config_replace:
if filename:
execute_check_cmd("config_replace -f {}".format(filename))
else:
execute_check_cmd("config_replace")
else:
execute_check_cmd("config reload -y {}".format(filename))
def dump_click_cmds():
script = "/etc/spytest/remote/click-helper.py"
execute_check_cmd("python {}".format(script), show=False)
def set_port_defaults(breakout, speed, port_init_wait, poll_for_ports):
if g_breakout_native:
script = "/usr/local/bin/port_breakout.py"
else:
script = None
if not script or not os.path.exists(script):
script = "/etc/spytest/remote/port_breakout.py"
if g_breakout_file:
script = script + " -c " + g_breakout_file
index = 0
while index < len(breakout):
opt = breakout[index+1]
execute_check_cmd("python {} -p {} -o {}".format(script, breakout[index], opt))
index = index + 2
if breakout:
ensure_mac_address(config_file)
do_config_reload()
index = 0
while index < len(speed):
opt = speed[index+1]
retval = execute_check_cmd("portconfig -p {} -s {}".format(speed[index], opt))
for line in retval.split("\n"):
match = re.match(r"^Port Ethernet\d+ belongs to port group (\d+)", line)
if match:
execute_check_cmd("config portgroup speed {} {}".format(match.group(1), opt))
break
index = index + 2
if speed:
execute_check_cmd("config save -y")
wait_for_ports(port_init_wait, poll_for_ports)
def config_reload(save, port_init_wait, poll_for_ports):
if save == "yes":
execute_check_cmd("config save -y")
ensure_mac_address(config_file)
do_config_reload()
wait_for_ports(port_init_wait, poll_for_ports)
def copy_or_delete(from_file, to_file):
if os.path.exists(from_file):
execute_check_cmd("cp {} {}".format(from_file, to_file))
else:
execute_check_cmd("rm -f {}".format(to_file))
def show_file_content(filename, msg=""):
if g_debug:
print("======================== {} ====================".format(msg))
if os.path.exists(filename):
execute_check_cmd("cat {}".format(filename))
else:
print("File {} does not exist".format(filename))
print("================================================")
def save_base_config():
# Save init_config_db.json and copy to base_config_db.json
execute_check_cmd("cp {} {}".format(init_config_file, base_config_file))
# Copy all init files as base files
copy_or_delete(init_frr_config_file, base_frr_config_file)
copy_or_delete(init_copp_config_file, base_copp_config_file)
copy_or_delete(init_minigraph_file, base_minigraph_file)
print("DONE")
def save_module_config():
# Save current DB configuration to config_db.json and copy it as module config file.
execute_check_cmd("config save -y")
execute_check_cmd("cp {} {}".format(config_file, module_config_file))
# save the FRR configuration applied in module init
execute_check_cmd("touch {}".format(frr_config_file))
execute_check_cmd("vtysh -c write file")
show_file_content(frr_config_file, "save_module_config FRR")
# Copy all the actual files as module files.
copy_or_delete(frr_config_file, module_frr_config_file)
copy_or_delete(minigraph_file, module_minigraph_file)
# Copy copp config file to ta location.
execute_check_cmd("docker cp swss:{} {}".format(copp_config_file, module_copp_config_file))
print("DONE")
def apply_ta_config(method, port_init_wait, poll_for_ports, is_module_cfg):
ta_config_file = module_config_file if is_module_cfg else base_config_file
ta_frr_config_file = module_frr_config_file if is_module_cfg else base_frr_config_file
ta_copp_config_file = module_copp_config_file if is_module_cfg else base_copp_config_file
ta_minigraph_file = module_minigraph_file if is_module_cfg else base_minigraph_file
# If no base/module config_db.json return back. No need to check for other file formats.
if not os.path.exists(ta_config_file):
print("==============================================================================")
print("======================= TA DEFAULT CONFIG FILE IS MISSING ====================")
print("==============================================================================")
print("NOFILE")
return
changed_files = []
# Save current config in DB to temp file to and compare it with base/module config_db.json file
# If there is a change, add config to list.
execute_check_cmd("config save -y {}".format(tmp_config_file))
if not get_file_diff(tmp_config_file, ta_config_file, g_debug):
trace("TA Config File Differs")
changed_files.append("config")
# Compare the minigraph.xml file with base/module minigraph.xml
# If there is a change, add xml to list.
if os.path.exists(minigraph_file) and os.path.exists(ta_minigraph_file):
if not filecmp.cmp(minigraph_file, ta_minigraph_file):
trace("TA Minigraph File Differs")
changed_files.append("minigraph")
# When frr.conf file is not present, write file creates 3 different config files.
# Touch the frr.conf, this allows to create a empty file is it is not present.
# Write the current running configuration to frr.conf file
# Compare the generated frr.conf with base/module frr.conf file.
# If there is a change or no base/module/actual frr.conf file exists, add frr to list.
show_file_content(frr_config_file, "existing FRR")
execute_check_cmd("touch {}".format(frr_config_file))
execute_check_cmd("vtysh -c write file")
show_file_content(frr_config_file, "generated FRR")
show_file_content(ta_frr_config_file, "TA FRR")
if not os.path.exists(ta_frr_config_file) and not os.path.exists(frr_config_file):
pass
elif not os.path.exists(ta_frr_config_file):
trace("TA FRR File Missing")
changed_files.append("frr")
elif not filecmp.cmp(frr_config_file, ta_frr_config_file):
trace("FRR File Differs")
changed_files.append("frr")
# Save and compare the copp.json file
execute_check_cmd("docker cp swss:{} {}".format(copp_config_file, tmp_copp_file))
if not os.path.exists(tmp_copp_file) and os.path.exists(ta_copp_config_file):
trace("SWSS COPP File Missing")
changed_files.append("copp")
elif os.path.exists(tmp_copp_file) and not os.path.exists(ta_copp_config_file):
trace("TA COPP File Missing")
changed_files.append("copp")
elif os.path.exists(tmp_copp_file) and os.path.exists(ta_copp_config_file):
if not get_file_diff(tmp_copp_file, ta_copp_config_file, g_debug):
trace("COPP File Differs")
changed_files.append("copp")
# If a force method is *NOT* used, check for any entries in changed list
# If no entries are present(Means no change in configs), Return back.
if method not in ["force_reload", "force_reboot"]:
if not changed_files:
print("Config, FRR, COPP are same as TA files")
print("DONE")
return
print("The current config differs from TA config, {}".format(changed_files))
# Check for each entry in changed list and copy the base/module files to the actual files.
# Copy base/module config file to actual file if config entry exists.
# If base/module frr file not exists, remove the actual file.
# Copy base/module frr file to actual file if frr entry exists.
# COPP
if "config" in changed_files:
execute_check_cmd("cp {} {}".format(ta_config_file, config_file))
if os.path.exists(ta_minigraph_file) and "minigraph" in changed_files:
execute_check_cmd("cp -f {} {}".format(ta_minigraph_file, minigraph_file))
if not os.path.exists(ta_frr_config_file) and "frr" in changed_files:
execute_check_cmd("rm -f {}".format(frr_config_file))
if os.path.exists(ta_frr_config_file) and "frr" in changed_files:
execute_check_cmd("cp -f {} {}".format(ta_frr_config_file, frr_config_file))
if os.path.exists(ta_copp_config_file) and "copp" in changed_files:
execute_check_cmd("docker cp {} swss:{}".format(ta_copp_config_file, copp_config_file))
method = "force_reboot"
# We copied the changed files to actual files.
# If reboot related method is used, return back asking for reboot required.
if method in ["force_reboot", "reboot"]:
print("REBOOT REQUIRED")
return
# Following code is required for reload related methods.
# Create an empty frr.conf file. This will allow to write the running config to single file.
execute_check_cmd("touch {}".format(frr_config_file))
# Depending on the entries in the changed list, perform the operations.
if "minigraph" in changed_files:
execute_check_cmd("config load_minigraph -y")
execute_check_cmd("config save -y")
# If config entry is present, perform config reload, this will take care of frr too.
# If frr enry is present, perform bgp docker restart.
if "config" in changed_files or method in ["force_reload"]:
ensure_mac_address(ta_config_file)
#execute_check_cmd("echo before reload;date")
do_config_reload(ta_config_file)
#execute_check_cmd("echo after reload;date")
if "frr" in changed_files or method in ["force_reload"]:
execute_cmds(["systemctl restart bgp"])
execute_cmds(["sleep 10"])
# Re-Write the base/module frr.conf, this is to allow the hook level code to get saved in frr.conf.
execute_check_cmd("vtysh -c write file")
if os.path.exists(frr_config_file):
execute_check_cmd("cp -f {} {}".format(frr_config_file, ta_frr_config_file))
show_file_content(ta_frr_config_file, "rewrite TA FRR")
# Wait for last port to be available
wait_for_ports(port_init_wait, poll_for_ports)
def run_test(script_fullpath, proc_args):
if os.path.exists(script_fullname):
execute_check_cmd("chmod 755 {}".format(script_fullpath))
args_to_script = " ".join(proc_args)
cmd = "{} {}".format(script_fullpath, args_to_script)
execute_check_cmd(cmd)
return
print("Script '{}' not exists".format(script_fullpath))
def enable_disable_debug(flag):
if not os.path.exists(syslog_file):
print("==============================================================================")
print("============================= SYSLOG FILE IS MISSING =========================")
print("==============================================================================")
print("NOFILE")
return
backup_file(syslog_file)
execute_check_cmd("cp {} {}".format(syslog_file, tmp_syslog_file))
cmd = ""
if flag:
cmd = '''sed -i '$ a :msg, contains, "(core dumped)" /dev/console' {}'''.format(syslog_file)
else:
cmd = '''sed '/core dumped/d' -i {}'''.format(syslog_file)
execute_check_cmd(cmd, False)
if not filecmp.cmp(syslog_file, tmp_syslog_file):
# files have different content
execute_check_cmd("systemctl restart rsyslog")
print("DONE")
return
print("NOCHANGE")
def read_messages(file_path, all_file, var_file, our_file):
(offset_file, offset) = read_offset(file_path)
execute_check_cmd("tail --lines=+{} {} > {}".format(offset, all_file, our_file))
execute_check_cmd("ls -l {}*".format(var_file))
retval = execute_check_cmd("wc -l {}".format(our_file))
write_offset(file_path, retval, offset)
def syslog_read_msgs(lvl, phase):
if phase: execute_check_cmd("sudo echo {}".format(phase))
file_path = "{}/syslog.offset".format(spytest_dir)
var_file = "/var/log/syslog"
our_file = "{}/syslog.txt".format(spytest_dir)
read_messages(file_path, var_file, var_file, our_file)
if lvl != "none" and lvl in syslog_levels:
index = syslog_levels.index(lvl)
needed = "|".join(syslog_levels[:index+1])
cmd = r"""grep -E "^\S+\s+[0-9]+\s+[0-9]+:[0-9]+:[0-9]+(\.[0-9]+){{0,1}}\s+\S+\s+({})" {}"""
execute_check_cmd(cmd.format(needed.upper(), our_file), False, True)
def do_sairedis(op):
if op == "clean":
execute_check_cmd("rm -f {}/sairedis.*".format(spytest_dir))
execute_check_cmd("rm -f /var/log/swss/sairedis.rec.*")
file_path = "{}/sairedis.offset".format(spytest_dir)
var_file = "/var/log/swss/sairedis.rec"
our_file = "{}/sairedis.txt".format(spytest_dir)
all_file = "{}/sairedis.all".format(spytest_dir)
execute_check_cmd("rm -f {0};ls -1tr {1}* | xargs zcat -f >> {0}".format(all_file, var_file))
read_messages(file_path, all_file, var_file, our_file)
if op == "read":
print("SAI-REDIS-FILE: /etc/spytest/sairedis.txt")
def invalid_ip(addr):
try:
socket.inet_aton(addr)
except:
return True
return False
def mgmt_ip_setting(mgmt_type, ip_addr_mask, gw_addr):
# Validate the ip/gw for static
if mgmt_type == "static":
if ip_addr_mask and gw_addr:
try:
(ipaddr, mask_str) = ip_addr_mask.split("/")
except:
print("IP and Mask should be provided with '/' delimited.")
return
try:
mask = int(mask_str)
if mask < 0 or mask > 32:
print("Invalid MASK provided.")
return
except:
print("Invalid MASK provided.")
return
if invalid_ip(ipaddr) or invalid_ip(gw_addr):
print("Invalid IP/Gateway provided.")
return
else:
print("IP or Gateway details not provided.")
return
file_dict = read_json(config_file)
if mgmt_type == "dhcp" and 'MGMT_INTERFACE' not in file_dict.keys():
print("DONE-NOCHANGE-DHCP")
return
print("Remove the required ip setting sections")
if 'MGMT_INTERFACE' in file_dict.keys():
del file_dict['MGMT_INTERFACE']
if mgmt_type == "static":
print("Adding new data")
mgmt_key = "eth0|{}".format(ip_addr_mask)
mgmt_dict = {mgmt_key: {"gwaddr": gw_addr}}
file_dict['MGMT_INTERFACE'] = mgmt_dict
# save the configuration
with open(config_file, 'w') as outfile:
json.dump(file_dict, outfile, indent=4)
print("DONE")
def fetch_core_files():
# Create a tar file for using the files /var/core/*.core.gz
core_files_list = glob.glob("/var/core/*.core.gz")
if len(core_files_list) == 0:
print("NO-CORE-FILES")
return
if os.path.exists(cores_tar_file_name):
execute_check_cmd("rm -f {}".format(cores_tar_file_name))
tar_cmd = "cd /var/core/ && tar -cf {} *.core.gz && cd -".format(cores_tar_file_name)
execute_check_cmd(tar_cmd)
if os.path.exists(cores_tar_file_name):
execute_check_cmd("rm -f /var/core/*.core.gz")
print("CORE-FILES: {}".format(cores_tar_file_name))
return
print("NO-CORE-FILES: No tar file is generated for core.gz files")
def get_tech_support():
# read last time stamp
lines = read_lines(tech_support_timestamp, [])
since = "--since='{}'".format(lines[0].strip()) if lines else ""
# Create a tar file for using the the command show techsupport
retcode = run_as_system_cmd("show techsupport {} > /tmp/show_tech_support.log 2>&1".format(since))
if retcode != 0:
print("NO-DUMP-FILES: 'show techsupport' command failed")
return
tech_support_tarlist = sorted(glob.glob("/var/dump/*.tar.gz"))
if len(tech_support_tarlist) == 0:
print("NO-DUMP-FILES: No techsupport tar file is generated in /var/dump/")
return
retval = "DUMP-FILES: {}".format(tech_support_tarlist[-1])
print(retval)
# save current time stamp
run_as_system_cmd("date > {}".format(tech_support_timestamp))
def fetch_kdump_files():
# Create a tar file for using the files /var/crash/datestamp & and the kexec_dump
kdump_files_type1 = execute_check_cmd("find /var/crash -name dmesg* | wc -l")
kdump_files_type2 = execute_check_cmd("find /var/crash -name kdump* | wc -l")
if kdump_files_type1 == '0' and kdump_files_type2 == '0':
print("NO-KDUMP-FILES")
return
if os.path.exists(kdump_tar_file_name):
execute_check_cmd("rm -f {}".format(kdump_tar_file_name))
tar_cmd = "cd /var/crash/ && tar -cf {} * && cd -".format(kdump_tar_file_name)
execute_check_cmd(tar_cmd)
if os.path.exists(kdump_tar_file_name):
execute_check_cmd("sudo rm -rf /var/crash/*")
print("KDUMP-FILES: {}".format(kdump_tar_file_name))
return
print("NO-KDUMP-FILES: No tar file is generated for kdump files")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SpyTest Helper script.')
parser.add_argument("--env", action="append", default=[],
nargs=2, help="environment variables")
parser.add_argument("--apply-configs", action="store", default=None,
nargs="+", help="list of files that need to apply on dut.")
parser.add_argument("--apply-file-method", action="store",
choices=['full', 'incremental'],
help="method to apply files.")
parser.add_argument("--run-test", action="store", default=None, nargs="+",
help="execute the given script with given arguments.")
parser.add_argument("--save-base-config", action="store_true", default=False,
help="save the current config as base config.")
parser.add_argument("--save-module-config", action="store_true", default=False,
help="save the current config as module config.")
parser.add_argument("--init-ta-config", action="store", default=None,
help="save the current config as ta default config.")
parser.add_argument("--port-init-wait", action="store", type=int, default=0,
help="Wait time in seconds for ports to come up -- default: 0")
parser.add_argument("--poll-for-ports", action="store", default="no",
choices=['yes', 'no'],
help="Poll for the ports status after the DUT reload")
parser.add_argument("--apply-base-config", action="store",
choices=['reload', 'replace', 'reboot', 'force_reload', 'force_reboot'],
help="apply base config as current config.")
parser.add_argument("--apply-module-config", action="store",
choices=['reload', 'replace', 'reboot', 'force_reload', 'force_reboot'],
help="apply module config as current config.")
parser.add_argument("--json-diff", action="store", nargs=2, default=None,
help="dump the difference between json files.")
parser.add_argument("--enable-debug", action="store_true", default=False,
help="enable debug messages onto the console.")
parser.add_argument("--disable-debug", action="store_true", default=False,
help="disable debug messages onto the console.")
parser.add_argument("--syslog-check", action="store", default=None,
choices=syslog_levels,
help="read syslog messages of given level and clear all syslog messages.")
parser.add_argument("--phase", action="store", default=None,
help="phase for checks.")
parser.add_argument("--sairedis", action="store", default="none",
choices=['clear', 'read', 'none', 'clean'], help="read sairedis messages.")
parser.add_argument("--execute-from-file", action="store", default=None,
help="execute commands from file.")
parser.add_argument("--set-mgmt-ip", action="store", default=None,
choices=['dhcp', 'static', None], help="Management(eth0) address type.")
parser.add_argument("--ip-addr-mask", action="store", default=None,
help="IP address to set for management port(eth0).")
parser.add_argument("--gw-addr", action="store", default=None,
help="Gateway address to set for management port(eth0).")
parser.add_argument("--fetch-core-files", action="store", default=None,
choices=['collect_kdump', 'none'],
help="Fetch the core files from DUT to logs location.")
parser.add_argument("--get-tech-support", action="store_true", default=False,
help="Get the tech-support information from DUT to logs location.")
parser.add_argument("--init-clean", action="store", default=None,
help="Clear the core files, dump files, syslog data etc.")
parser.add_argument("--update-reserved-ports", action="store", default=None,
nargs="+", help="list of reserved ports that need to be shutdown on dut.")
parser.add_argument("--breakout", action="store", default=[],
nargs="+", help="breakout operations to be performed.")
parser.add_argument("--speed", action="store", default=[],
nargs="+", help="speed operations to be performed.")
parser.add_argument("--port-defaults", action="store_true", default=None,
help="apply breakout/speed defaults.")
parser.add_argument("--dump-click-cmds", action="store_true", default=None,
help="dump all click commnds.")
parser.add_argument("--config-reload", action="store", default=None,
choices=['yes', 'no'],
help="perform config reload operation: yes=save+reload no=reload")
parser.add_argument("--wait-for-ports", action="store_true", default=None,
help="wait for ports to comeup.")
parser.add_argument("--config-profile", action="store", default="na",
choices=['l2', 'l3', 'na'], help="Profile name to load.")
parser.add_argument("--community-build", action="store_true", default=False,
help="use community build options.")
parser.add_argument("--breakout-native", action="store_true", default=False,
help="Use port breakout script from device.")
parser.add_argument("--breakout-file", action="store", default=None,
help="Use port breakout options from file.")
parser.add_argument("--use-config-replace", action="store_true", default=False,
help="use config replace where ever config reload is needed.")
parser.add_argument("--debug", action="store_true", default=False)
args, unknown = parser.parse_known_args()
if unknown:
print("IGNORING unknown arguments", unknown)
#g_debug = args.debug
g_community_build = args.community_build
g_breakout_native = args.breakout_native
g_breakout_file = args.breakout_file
g_use_config_replace = args.use_config_replace
for name, value in args.env:
os.environ[name] = value
if args.apply_configs:
parse_and_apply_files(args.apply_configs, args.apply_file_method)
elif args.run_test:
script_fullname = args.run_test[0]
script_arguments = args.run_test[1:]
run_test(script_fullname, script_arguments)
elif args.init_ta_config:
init_ta_config(args.init_ta_config, args.config_profile)
elif args.save_base_config:
save_base_config()
elif args.save_module_config:
save_module_config()
elif args.apply_base_config:
apply_ta_config(args.apply_base_config, args.port_init_wait, args.poll_for_ports, False)
elif args.apply_module_config:
apply_ta_config(args.apply_module_config, args.port_init_wait, args.poll_for_ports, True)
elif args.json_diff:
retval = get_file_diff(args.json_diff[0], args.json_diff[1], True)
print(retval)
elif args.enable_debug:
enable_disable_debug(True)
elif args.disable_debug:
enable_disable_debug(False)
elif args.syslog_check:
syslog_read_msgs(args.syslog_check, args.phase)
elif args.sairedis != "none":
do_sairedis(args.sairedis)
elif args.execute_from_file:
execute_from_file(args.execute_from_file)
elif args.set_mgmt_ip:
mgmt_ip_setting(args.set_mgmt_ip, args.ip_addr_mask, args.gw_addr)
elif args.fetch_core_files:
fetch_core_files()
if args.fetch_core_files == "collect_kdump":
fetch_kdump_files()
elif args.get_tech_support:
get_tech_support()
elif args.init_clean:
init_clean(args.init_clean)
elif args.update_reserved_ports:
update_reserved_ports(args.update_reserved_ports)
elif args.port_defaults:
set_port_defaults(args.breakout, args.speed, args.port_init_wait, args.poll_for_ports)
elif args.dump_click_cmds:
dump_click_cmds()
elif args.config_reload:
config_reload(args.config_reload, args.port_init_wait, args.poll_for_ports)
elif args.wait_for_ports:
wait_for_ports(args.port_init_wait, args.poll_for_ports)
elif args.config_profile:
apply_config_profile(args.config_profile)
else:
print("Error: Invalid/Unknown arguments provided for the script.")
| [
"[email protected]"
] | |
3ae8ac2cf1fd31a817682334a42b0a5be16ee6b1 | d267ec32822b24092f617e88da919d1709549394 | /wproject1m/ecommerce2/One_GB_Mobiles/models.py | da5e919880576434f6e016c4f8a762046b6dfaa9 | [] | no_license | sam-student/Evaluation | 42fcccae54358fbb6a8bef8c5f9d80a7bc075864 | 3ba7842a15e431d30618c28819ea9b64c618ef2a | refs/heads/master | 2020-05-17T04:23:08.997952 | 2019-04-25T21:05:46 | 2019-04-25T21:05:46 | 183,507,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,768 | py | import random
import os
from django.db import models
from django.urls import reverse
# Create your models here.
from django.db.models.signals import pre_save, post_save
from ecommerce.utils import unique_slug_generator
def get_filename_ext(filename):
base_name=os.path.basename(filename)
name, ext = os.path.splitext(filename)
return name,ext
def upload_image_path(instance,filename):
print(instance)
print(filename)
new_filename=random.randint(1,39321457854)
name,ext = get_filename_ext(filename)
final_filename = '{new_filename}{ext}'.format(new_filename=new_filename,ext=ext)
return "One_GB_Mobiles/{new_filename}/{final_filename}".format(new_filename=filename, final_filename=final_filename)
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter()
def featured(self):
return self.filter()
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
# def all(self):
# return self.get_queryset().active()
#
# def featured(self):
# return self.get_queryset().featured()
def get_by_id(self, id):
qs = self.get_queryset().filter(id = id)
if qs.count() == 1:
return qs.first()
return None
class One_GB_Mobile(models.Model):
title = models.CharField(max_length=120)
slug = models.SlugField(blank=True, unique=True)
price = models.DecimalField(decimal_places=2, max_digits=20, default=39.99)
Charging = models.TextField(default="good speakers")
Torch = models.TextField(default="Yes")
Games = models.TextField(default="built-in + downloadable")
Messaging = models.TextField(default=", SMS (threaded view), MMS, Email, Push Email")
Browser = models.TextField(default="HTML5")
Audio = models.TextField(default="3.5mm audio jack, MP4/WMV/H.264 player")
Data = models.TextField(default="GPRS, Edge, 3G (HSPA 42.2/5.76 Mbps), 4G (LTE-A (2CA) Cat6 300/50 Mbps")
NFC = models.TextField(default="Yes")
USB = models.TextField(default="microUSB 2.0")
GPS = models.TextField(default="Yes + A-GPS support & Glonass, BDS, GALILEO")
Bluetooth = models.TextField(default="None")
Wifi = models.TextField(default="Wi-Fi 802.11 a/b/g/n/ac, dual-band, hotspot")
Front = models.TextField(default="13 MP, f/1.9, LED flash")
Main = models.TextField(default="8MP")
card = models.TextField(default="Yes")
BuiltIn = models.TextField(default="16GB Built-in")
Features = models.TextField(default="None")
Protection = models.TextField(default="Yes")
Resolution = models.TextField(default="720 x 1280 Pixels (~282 PPI) ")
Size = models.TextField(default="5.5 inches")
Technology = models.TextField(default="None")
GPU = models.TextField(default="Mali-T830MP2 ")
Chipset = models.TextField(default="None")
CPU = models.TextField(default="None")
FourGBand = models.TextField(default="LTE")
ThreeGBand = models.TextField(default="HSDPA 850 / 900 / 1700(AWS) / 1900 / 2100 ")
TwoGBand = models.TextField(default="SIM1: GSM 850 / 900 / 1800 / 1900 SIM2: GSM 850 / 900 / 1800 / 1900 ")
Color = models.TextField(default="Silver, Space Gray, Gold")
SIM = models.TextField(default="Single SIM (Nano-SIM) ")
Weight = models.TextField(default="148g")
Dimension = models.TextField(default="146.2 x 71.3 x 8 mm")
UIBuild = models.TextField(default="TouchWiz UI")
OperatingSystem = models.TextField(default="Android v7.1 Nougat")
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
image1 = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
image2 = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
Review_count = models.TextField(default="90")
Average_Rating = models.TextField(default=" 4")
Reviews = models.TextField(default="None")
Ram = models.TextField(default="2GB")
# description = models.TextField()
# featured = models.BooleanField(default=False)
# active = models.BooleanField(default=True)
# timestamp = models.DateTimeField(auto_now_add=True)
objects = ProductManager()
def get_absolute_url(self):
#return "/products/{slug}".format(slug=self.slug)
return reverse("One_GB_Mobiles:detail", kwargs={"slug": self.slug})
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def name(self):
return self.title
def product_pre_save_receiver(sender, instance , *args,**kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(product_pre_save_receiver, sender=One_GB_Mobile) | [
"[email protected]"
] | |
2469c0eba172dd50239c61a100a2e4db476432c2 | 5733fb1a6746146889ac0941258ef5716ea17e7e | /snippets/migrations/0003_auto_20171127_0352.py | fa74dc12b146cc8cb2888b9c339cc997394a825c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | teraoka-hiroshi/django-auth-example | b401df8877c3fc9ca61cf1cdb7d7541ef8e19820 | 675492aeb5f42dc04f9ba5de7f8f528120ddceea | refs/heads/master | 2022-01-13T08:23:25.879459 | 2018-05-21T17:06:22 | 2018-05-21T17:06:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # Generated by Django 2.0rc1 on 2017-11-26 18:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('snippets', '0002_auto_20171127_0329'),
]
operations = [
migrations.RenameField(
model_name='snippet',
old_name='posted_by',
new_name='created_by',
),
migrations.RemoveField(
model_name='snippet',
name='created_date',
),
migrations.AddField(
model_name='snippet',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='投稿日'),
preserve_default=False,
),
migrations.AddField(
model_name='snippet',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='更新日'),
),
]
| [
"[email protected]"
] | |
680ef4bff1f4d131a6765303e1123e2525fa7bb0 | d7f2df4896898b9c30ce58507ecc72d83c34f07c | /classification.py | a079ad32e31b4e0e1fbb10af9cea8d01280bd65b | [] | no_license | candlewill/Vecamend | a12e6f74f22325cd7993c41661816780d2f3e868 | 7b73678cd4eb4aba926d4cbe752c91c7fa10ebc3 | refs/heads/master | 2021-01-10T13:27:10.847329 | 2016-01-18T16:09:00 | 2016-01-18T16:09:00 | 46,402,087 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | import numpy as np
from load_data import load_pickle
from sklearn.cross_validation import ShuffleSplit
from sklearn.linear_model import LogisticRegression
def build_data():
positive_data = load_pickle('./tmp/amended_pos.p')
negative_data = load_pickle('./tmp/amended_neg.p')
X, Y = [], []
for pos in positive_data.keys():
X.append(positive_data[pos])
Y.append(1)
for neg in negative_data.keys():
X.append(negative_data[neg])
Y.append(0)
return np.array(X), np.array(Y)
def train_model(X, Y):
nub_iter = 20
rs = ShuffleSplit(n=len(X), n_iter=nub_iter, test_size=0.2, indices=True, random_state=0)
accuracy = []
for train_index, test_index in rs:
X_test, Y_test = X[test_index], Y[test_index]
X_train, Y_train = X[train_index], Y[train_index]
classifier = LogisticRegression()
classifier.fit(X_train, Y_train)
acc = classifier.score(X_test, Y_test)
accuracy.append(acc)
print('准确率Accuracy: %s.'%acc)
print('平均准确率: %s.' % np.mean(np.array(accuracy)))
if __name__ == '__main__':
X, Y = build_data()
train_model(X, Y)
| [
"[email protected]"
] | |
ba451ddd52423d13c07f5377076fc5316f56263b | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/web/v20190801/list_static_site_function_app_settings.py | 6388c3e6d80456c0f9b063f72414997bc774ce73 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListStaticSiteFunctionAppSettingsResult',
'AwaitableListStaticSiteFunctionAppSettingsResult',
'list_static_site_function_app_settings',
'list_static_site_function_app_settings_output',
]
@pulumi.output_type
class ListStaticSiteFunctionAppSettingsResult:
"""
String dictionary resource.
"""
def __init__(__self__, id=None, kind=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListStaticSiteFunctionAppSettingsResult(ListStaticSiteFunctionAppSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStaticSiteFunctionAppSettingsResult(
id=self.id,
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_static_site_function_app_settings(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStaticSiteFunctionAppSettingsResult:
"""
String dictionary resource.
:param str name: Name of the static site.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20190801:listStaticSiteFunctionAppSettings', __args__, opts=opts, typ=ListStaticSiteFunctionAppSettingsResult).value
return AwaitableListStaticSiteFunctionAppSettingsResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
@_utilities.lift_output_func(list_static_site_function_app_settings)
def list_static_site_function_app_settings_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListStaticSiteFunctionAppSettingsResult]:
"""
String dictionary resource.
:param str name: Name of the static site.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
...
| [
"[email protected]"
] | |
2e7c3ca2251c8b4024a5b4bf215a578d51f2c361 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_upgr2_20180607105940.py | e76935186c51e8439262854cc75ecf84734205af | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,220 | py | #row1 = [0,0,0,0,0,0,0,0,0]
#row2 = [0,0,0,5,0,6,0,0,0]
#row3 = [0,0,1,0,0,0,0,3,0]
#row4 = [0,9,5,0,0,0,2,0,0]
#row5 = [0,0,0,0,0,1,6,0,7]
#row6 = [1,0,6,0,0,9,0,0,5]
#row7 = [7,0,0,8,0,3,9,0,0]
#row8 = [0,3,8,9,0,0,0,2,0]
#row9 = [0,5,0,0,2,0,7,0,0]
columns = [1,2,3,4,5,6,7,8,9]
row1 = [9,8,7,4,3,2,5,6,1]
row2 = [2,4,3,5,1,6,8,7,9]
row3 = [5,6,1,7,9,8,4,3,2]
row4 = [3,9,5,6,4,7,2,1,8]
row5 = [8,2,4,3,5,1,6,9,7]
row6 = [1,7,6,2,8,9,3,4,5]
row7 = [7,1,2,8,6,3,9,5,4]
row8 = [4,3,8,9,7,5,1,2,6]
row9 = [0,5,0,0,2,0,7,0,0]
def print_sudoku():
print(' ', columns[0],columns[1],columns[2], sep=' ', end=" ")
print(columns[3],columns[4],columns[5], sep=' ', end=" ")
print(columns[6],columns[7],columns[8], sep=' ')
print(" -------------------------------------" )
print('1 |', row1[0],row1[1],row1[2], sep=' ', end=" | ")
print(row1[3],row1[4],row1[5], sep=' ', end=" | ")
print(row1[6],row1[7],row1[8], "|", sep=' ')
print(" | | | |")
print('2 |', row2[0],row2[1],row2[2], sep=' ', end=" | ")
print(row2[3],row2[4],row2[5], sep=' ', end=" | ")
print(row2[6],row2[7],row2[8], "|", sep=' ')
print(" | | | |")
print('3 |', row3[0],row3[1],row3[2], sep=' ', end=" | ")
print(row3[3],row3[4],row3[5], sep=' ', end=" | ")
print(row3[6],row3[7],row3[8], "|", sep=' ')
print(" |-----------------------------------|" )
print('4 |', row4[0],row4[1],row4[2], sep=' ', end=" | ")
print(row4[3],row4[4],row4[5], sep=' ', end=" | ")
print(row4[6],row4[7],row4[8], "|", sep=' ')
print(" | | | |")
print('5 |', row5[0],row5[1],row5[2], sep=' ', end=" | ")
print(row5[3],row5[4],row5[5], sep=' ', end=" | ")
print(row5[6],row5[7],row5[8], "|", sep=' ')
print(" | | | |")
print('6 |', row6[0],row6[1],row6[2], sep=' ', end=" | ")
print(row6[3],row6[4],row6[5], sep=' ', end=" | ")
print(row6[6],row6[7],row6[8], "|", sep=' ')
print(" |-----------------------------------|" )
print('7 |', row7[0],row7[1],row7[2], sep=' ', end=" | ")
print(row7[3],row7[4],row7[5], sep=' ', end=" | ")
print(row7[6],row7[7],row7[8], "|", sep=' ')
print(" | | | |")
print('8 |', row8[0],row8[1],row8[2], sep=' ', end=" | ")
print(row8[3],row8[4],row8[5], sep=' ', end=" | ")
print(row8[6],row8[7],row8[8], "|", sep=' ')
print(" | | | |")
print('9 |', row9[0],row9[1],row9[2], sep=' ', end=" | ")
print(row9[3],row9[4],row9[5], sep=' ', end=" | ")
print(row9[6],row9[7],row9[8], "|", sep=' ')
print(" |-----------------------------------|" )
print("Your sudoku to solve:")
print_sudoku()
while True:
print("Input 3 numbers in format a b c, np. 4 5 8")
print("a - row number")
print("b - column number ")
print("c - value \n ")
x = input("Input a b c: ")
print("")
numbers= "0123456789"
if len(x) != 5:
print("BŁĄD - niepoprawny format!\n ")
continue
if (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (str(x[4]) not in numbers) or (str(x[1]) != " "):
print("BŁĄD - niepoprawny format!\n ")
continue
if int(x[0])==1:
row1[int(x[2])-1]=int(x[4])
elif int(x[0])==2:
row2[int(x[2])-1]=int(x[4])
elif int(x[0])==3:
row3[int(x[2])-1]=int(x[4])
elif int(x[0])==4:
row4[int(x[2])-1]=int(x[4])
elif int(x[0])==5:
row5[int(x[2])-1]=int(x[4])
elif int(x[0])==6:
row6[int(x[2])-1]=int(x[4])
elif int(x[0])==7:
row7[int(x[2])-1]=int(x[4])
elif int(x[0])==8:
row8[int(x[2])-1]=int(x[4])
elif int(x[0])==9:
row9[int(x[2])-1]=int(x[4])
print_sudoku()
if sum(row1) == 45 and sum(row2) == 45 and sum(row3) == 45 and sum(row4) == 45 and sum(row5) == 45 and sum(row6) == 45 and sum(row7) == 45 and sum(row8) == 45 and sum(row9) == 45:
print("YOU WIN !! Master teach me!")
break | [
"[email protected]"
] | |
40f2eac079d40bc274d3a0b07534b141a26c2887 | 6d9ebbee5dd515ff8d1e039b28ebcdbe185f6275 | /info/modules/uic/ex_loaduitype.py | 9d512b9e7c59a341e0e9d153b8620f8823240bcd | [] | no_license | volitilov/PyQt5_learn | 50bc378798609d98db2bd7fabe4b13ad1257e308 | f5270173d62bb61b374593cb22c4f9905a61d404 | refs/heads/master | 2021-09-08T14:12:58.387721 | 2018-03-10T10:03:06 | 2018-03-10T10:03:06 | 115,354,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | from PyQt5 import QtWidgets, uic
import sys
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# class MyWidget(QtWidgets.QWidget):
# def __init__(self, parent=None):
# QtWidgets.QWidget.__init__(self, parent)
# Form, _ = uic.loadUiType('MyForm.ui')
# self.ui = Form()
# self.ui.setupUi(self)
# self.ui.button.clicked.connect(QtWidgets.qApp.quit)
Form, _ = uic.loadUiType('MyForm.ui')
class MyWidget(QtWidgets.QWidget, Form):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setupUi(self)
self.button.clicked.connect(QtWidgets.qApp.quit)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = MyWidget()
window.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
5a2acdcac93d580ba844a41f6be3e5618201d90b | a5b8dc5566567a8d23fc061b98ea2aa55e8f6361 | /tests/test_endpoints_sync_methods.py | d2411b2d1594f498fcd609e6a5d0fea369d5b703 | [
"MIT"
] | permissive | vltr/sanic-jwt | 5f512f91e89121c55498c88669c44dce441fdac8 | 19df69f78db121404325417f71d7bef2d1d4738d | refs/heads/master | 2021-05-11T06:31:34.497767 | 2018-02-06T19:00:50 | 2018-02-06T19:01:42 | 117,989,778 | 0 | 0 | null | 2018-01-18T13:57:27 | 2018-01-18T13:57:27 | null | UTF-8 | Python | false | false | 5,020 | py | import binascii
import os
from sanic import Sanic
from sanic.response import json
import pytest
from sanic_jwt import initialize
from sanic_jwt import exceptions
from sanic_jwt.decorators import protected
@pytest.yield_fixture
def app_with_sync_methods(users):
cache = {}
def authenticate(request, *args, **kwargs):
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username or not password:
raise exceptions.AuthenticationFailed(
"Missing username or password.")
user = None
for u in users:
if u.username == username:
user = u
break
if user is None:
raise exceptions.AuthenticationFailed("User not found.")
if password != user.password:
raise exceptions.AuthenticationFailed("Password is incorrect.")
return user
def store_refresh_token(user_id, refresh_token, *args, **kwargs):
key = 'refresh_token_{user_id}'.format(user_id=user_id)
cache[key] = refresh_token
def retrieve_refresh_token(user_id, *args, **kwargs):
key = 'refresh_token_{user_id}'.format(user_id=user_id)
return cache.get(key, None)
def retrieve_user(request, payload, *args, **kwargs):
if payload:
user_id = payload.get('user_id', None)
if user_id is not None:
for u in users:
if u.user_id == user_id:
return u
else:
return None
sanic_app = Sanic()
initialize(
sanic_app,
authenticate=authenticate,
store_refresh_token=store_refresh_token,
retrieve_refresh_token=retrieve_refresh_token,
retrieve_user=retrieve_user)
sanic_app.config.SANIC_JWT_REFRESH_TOKEN_ENABLED = True
sanic_app.config.SANIC_JWT_SECRET = str(
binascii.hexlify(os.urandom(32)), 'utf-8')
@sanic_app.route("/")
async def helloworld(request):
return json({"hello": "world"})
@sanic_app.route("/protected")
@protected()
async def protected_request(request):
return json({"protected": True})
yield sanic_app
class TestEndpointsSync(object):
@pytest.yield_fixture
def authenticated_response(self, app_with_sync_methods):
_, response = app_with_sync_methods.test_client.post(
'/auth', json={
'username': 'user1',
'password': 'abcxyz'
})
assert response.status == 200
yield response
def test_root_endpoint(self, app_with_sync_methods):
_, response = app_with_sync_methods.test_client.get('/')
assert response.status == 200
assert response.json.get('hello') == 'world'
def test_protected_endpoint(self, app_with_sync_methods,
authenticated_response):
access_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
_, response = app_with_sync_methods.test_client.get(
'/protected',
headers={
'Authorization': 'Bearer {}'.format(access_token)
})
assert response.status == 200
assert response.json.get('protected') is True
def test_me_endpoint(self, app_with_sync_methods,
authenticated_response):
access_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
_, response = app_with_sync_methods.test_client.get(
'/auth/me',
headers={
'Authorization': 'Bearer {}'.format(access_token)
})
assert response.status == 200
def test_refresh_token_sunc(self, app_with_sync_methods,
authenticated_response):
access_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
refresh_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_REFRESH_TOKEN_NAME, None)
_, response = app_with_sync_methods.test_client.post(
'/auth/refresh',
headers={'Authorization': 'Bearer {}'.format(access_token)},
json={
app_with_sync_methods.config.
SANIC_JWT_REFRESH_TOKEN_NAME:
refresh_token
})
new_access_token = response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
assert response.status == 200
assert new_access_token is not None
assert response.json.get(
app_with_sync_methods.config.SANIC_JWT_REFRESH_TOKEN_NAME,
None) is None # there is no new refresh token
assert \
app_with_sync_methods.config.SANIC_JWT_REFRESH_TOKEN_NAME \
not in response.json
| [
"[email protected]"
] | |
01af2a04a0141f45f66ac9df7ebd501817afba5f | 03de685efae7d8f6de0e98c3008cb89f87825fb4 | /robot/transform.py | 8115f3c0cf94ebbe7bebfc48cec53dfc3436e341 | [] | no_license | gedeschaines/robotics-toolbox-python | 161f7af8be91c51e1902021ba9f9dc3f6fc5b766 | 22eb2394172e60b1dbca03d4be9bb0ecaf49b183 | refs/heads/master | 2021-06-14T00:42:24.468518 | 2021-02-17T22:20:36 | 2021-02-17T22:20:36 | 140,235,483 | 13 | 3 | null | 2019-01-06T13:30:23 | 2018-07-09T05:29:19 | Python | UTF-8 | Python | false | false | 17,961 | py | """
Primitive operations for 3x3 orthonormal and 4x4 homogeneous matrices.
Python implementation by: Luis Fernando Lara Tobar and Peter Corke.
Based on original Robotics Toolbox for Matlab code by Peter Corke.
Permission to use and copy is granted provided that acknowledgement of
the authors is made.
@author: Luis Fernando Lara Tobar and Peter Corke
"""
from numpy import *
from robot.utility import *
from numpy.linalg import norm
import robot.Quaternion as Q
def rotx(theta):
"""
Rotation about X-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about X-axis
@see: L{roty}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[1, 0, 0],
[0, ct, -st],
[0, st, ct]])
def roty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Y-axis
@see: L{rotx}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, 0, st],
[0, 1, 0],
[-st, 0, ct]])
def rotz(theta):
"""
Rotation about Z-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Z-axis
@see: L{rotx}, L{roty}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, -st, 0],
[st, ct, 0],
[ 0, 0, 1]])
def trotx(theta):
"""
Rotation about X-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about X-axis
@see: L{troty}, L{trotz}, L{rotx}
"""
return r2t(rotx(theta))
def troty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about Y-axis
@see: L{troty}, L{trotz}, L{roty}
"""
return r2t(roty(theta))
def trotz(theta):
"""
Rotation about Z-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about Z-axis
@see: L{trotx}, L{troty}, L{rotz}
"""
return r2t(rotz(theta))
##################### Euler angles
def tr2eul(m):
"""
Extract Euler angles.
Returns a vector of Euler angles corresponding to the rotational part of
the homogeneous transform. The 3 angles correspond to rotations about
the Z, Y and Z axes respectively.
@type m: 3x3 or 4x4 matrix
@param m: the rotation matrix
@rtype: 1x3 matrix
@return: Euler angles [S{theta} S{phi} S{psi}]
@see: L{eul2tr}, L{tr2rpy}
"""
try:
m = mat(m)
if ishomog(m):
euler = mat(zeros((1,3)))
if norm(m[0,2])<finfo(float).eps and norm(m[1,2])<finfo(float).eps:
# singularity
euler[0,0] = 0
sp = 0
cp = 1
euler[0,1] = arctan2(cp*m[0,2] + sp*m[1,2], m[2,2])
euler[0,2] = arctan2(-sp*m[0,0] + cp*m[1,0], -sp*m[0,1] + cp*m[1,1])
return euler
else:
euler[0,0] = arctan2(m[1,2],m[0,2])
sp = sin(euler[0,0])
cp = cos(euler[0,0])
euler[0,1] = arctan2(cp*m[0,2] + sp*m[1,2], m[2,2])
euler[0,2] = arctan2(-sp*m[0,0] + cp*m[1,0], -sp*m[0,1] + cp*m[1,1])
return euler
except ValueError:
euler = []
for i in range(0,len(m)):
euler.append(tr2eul(m[i]))
return euler
def eul2r(phi, theta=None, psi=None):
"""
Rotation from Euler angles.
Two call forms:
- R = eul2r(S{theta}, S{phi}, S{psi})
- R = eul2r([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, Z axes respectively.
@type phi: number or list/array/matrix of angles
@param phi: the first Euler angle, or a list/array/matrix of angles
@type theta: number
@param theta: the second Euler angle
@type psi: number
@param psi: the third Euler angle
@rtype: 3x3 orthonormal matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2eul}, L{eul2tr}, L{tr2rpy}
"""
n = 1
if theta == None and psi==None:
# list/array/matrix argument
phi = mat(phi)
if numcols(phi) != 3:
error('bad arguments')
else:
n = numrows(phi)
psi = phi[:,2]
theta = phi[:,1]
phi = phi[:,0]
elif (theta!=None and psi==None) or (theta==None and psi!=None):
error('bad arguments')
elif not isinstance(phi,(int,int32,float,float64)):
# all args are vectors
phi = mat(phi)
n = numrows(phi)
theta = mat(theta)
psi = mat(psi)
if n>1:
R = []
for i in range(0,n):
r = rotz(phi[i,0]) * roty(theta[i,0]) * rotz(psi[i,0])
R.append(r)
return R
try:
r = rotz(phi[0,0]) * roty(theta[0,0]) * rotz(psi[0,0])
return r
except:
r = rotz(phi) * roty(theta) * rotz(psi)
return r
def eul2tr(phi,theta=None,psi=None):
"""
Rotation from Euler angles.
Two call forms:
- R = eul2tr(S{theta}, S{phi}, S{psi})
- R = eul2tr([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, Z axes respectively.
@type phi: number or list/array/matrix of angles
@param phi: the first Euler angle, or a list/array/matrix of angles
@type theta: number
@param theta: the second Euler angle
@type psi: number
@param psi: the third Euler angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2eul}, L{eul2r}, L{tr2rpy}
"""
return r2t( eul2r(phi, theta, psi) )
################################## RPY angles
def tr2rpy(m):
"""
Extract RPY angles.
Returns a vector of RPY angles corresponding to the rotational part of
the homogeneous transform. The 3 angles correspond to rotations about
the Z, Y and X axes respectively.
@type m: 3x3 or 4x4 matrix
@param m: the rotation matrix
@rtype: 1x3 matrix
@return: RPY angles [S{theta} S{phi} S{psi}]
@see: L{rpy2tr}, L{tr2eul}
"""
try:
m = mat(m)
if ishomog(m):
rpy = mat(zeros((1,3)))
if norm(m[0,0])<finfo(float).eps and norm(m[1,0])<finfo(float).eps:
# singularity
rpy[0,0] = 0
rpy[0,1] = arctan2(-m[2,0], m[0,0])
rpy[0,2] = arctan2(-m[1,2], m[1,1])
return rpy
else:
rpy[0,0] = arctan2(m[1,0],m[0,0])
sp = sin(rpy[0,0])
cp = cos(rpy[0,0])
rpy[0,1] = arctan2(-m[2,0], cp*m[0,0] + sp*m[1,0])
rpy[0,2] = arctan2(sp*m[0,2] - cp*m[1,2], cp*m[1,1] - sp*m[0,1])
return rpy
except ValueError:
rpy = []
for i in range(0,len(m)):
rpy.append(tr2rpy(m[i]))
return rpy
def rpy2r(roll, pitch=None,yaw=None):
"""
Rotation from RPY angles.
Two call forms:
- R = rpy2r(S{theta}, S{phi}, S{psi})
- R = rpy2r([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, X axes respectively.
@type roll: number or list/array/matrix of angles
@param roll: roll angle, or a list/array/matrix of angles
@type pitch: number
@param pitch: pitch angle
@type yaw: number
@param yaw: yaw angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2rpy}, L{rpy2r}, L{tr2eul}
"""
n=1
if pitch==None and yaw==None:
roll= mat(roll)
if numcols(roll) != 3:
error('bad arguments')
n = numrows(roll)
pitch = roll[:,1]
yaw = roll[:,2]
roll = roll[:,0]
if n>1:
R = []
for i in range(0,n):
r = rotz(roll[i,0]) * roty(pitch[i,0]) * rotx(yaw[i,0])
R.append(r)
return R
try:
r = rotz(roll[0,0]) * roty(pitch[0,0]) * rotx(yaw[0,0])
return r
except:
r = rotz(roll) * roty(pitch) * rotx(yaw)
return r
def rpy2tr(roll, pitch=None, yaw=None):
"""
Rotation from RPY angles.
Two call forms:
- R = rpy2tr(r, p, y)
- R = rpy2tr([r, p, y])
These correspond to rotations about the Z, Y, X axes respectively.
@type roll: number or list/array/matrix of angles
@param roll: roll angle, or a list/array/matrix of angles
@type pitch: number
@param pitch: pitch angle
@type yaw: number
@param yaw: yaw angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2rpy}, L{rpy2r}, L{tr2eul}
"""
return r2t( rpy2r(roll, pitch, yaw) )
###################################### OA vector form
def oa2r(o,a):
"""Rotation from 2 vectors.
The matrix is formed from 3 vectors such that::
R = [N O A] and N = O x A.
In robotics A is the approach vector, along the direction of the robot's
gripper, and O is the orientation vector in the direction between the
fingertips.
The submatrix is guaranteed to be orthonormal so long as O and A are
not parallel.
@type o: 3-vector
@param o: The orientation vector.
@type a: 3-vector
@param a: The approach vector
@rtype: 3x3 orthonormal rotation matrix
@return: Rotatation matrix
@see: L{rpy2r}, L{eul2r}
"""
n = crossp(o, a)
n = unit(n)
o = crossp(a, n);
o = unit(o).reshape(3,1)
a = unit(a).reshape(3,1)
return bmat('n o a')
def oa2tr(o,a):
"""otation from 2 vectors.
The rotation submatrix is formed from 3 vectors such that::
R = [N O A] and N = O x A.
In robotics A is the approach vector, along the direction of the robot's
gripper, and O is the orientation vector in the direction between the
fingertips.
The submatrix is guaranteed to be orthonormal so long as O and A are
not parallel.
@type o: 3-vector
@param o: The orientation vector.
@type a: 3-vector
@param a: The approach vector
@rtype: 4x4 homogeneous transformation matrix
@return: Transformation matrix
@see: L{rpy2tr}, L{eul2tr}
"""
return r2t(oa2r(o,a))
###################################### angle/vector form
def rotvec2r(theta, v):
"""
Rotation about arbitrary axis. Compute a rotation matrix representing
a rotation of C{theta} about the vector C{v}.
@type v: 3-vector
@param v: rotation vector
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation
@see: L{rotx}, L{roty}, L{rotz}
"""
v = arg2array(v);
ct = cos(theta)
st = sin(theta)
vt = 1-ct
r = mat([[ct, -v[2]*st, v[1]*st],\
[v[2]*st, ct, -v[0]*st],\
[-v[1]*st, v[0]*st, ct]])
return v*v.T*vt+r
def rotvec2tr(theta, v):
"""
Rotation about arbitrary axis. Compute a rotation matrix representing
a rotation of C{theta} about the vector C{v}.
@type v: 3-vector
@param v: rotation vector
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation
@see: L{trotx}, L{troty}, L{trotz}
"""
return r2t(rotvec2r(theta, v))
###################################### translational transform
def transl(x, y=None, z=None):
"""
Create or decompose translational homogeneous transformations.
Create a homogeneous transformation
===================================
- T = transl(v)
- T = transl(vx, vy, vz)
The transformation is created with a unit rotation submatrix.
The translational elements are set from elements of v which is
a list, array or matrix, or from separate passed elements.
Decompose a homogeneous transformation
======================================
- v = transl(T)
Return the translation vector
"""
if y==None and z==None:
x=mat(x)
try:
if ishomog(x):
return x[0:3,3].reshape(3,1)
else:
return concatenate((concatenate((eye(3),x.reshape(3,1)),1),mat([0,0,0,1])))
except AttributeError:
n=len(x)
r = [[],[],[]]
for i in range(n):
r = concatenate((r,x[i][0:3,3]),1)
return r
elif y!=None and z!=None:
return concatenate((concatenate((eye(3),mat([x,y,z]).T),1),mat([0,0,0,1])))
###################################### Skew symmetric transform
def skew(*args):
"""
Convert to/from skew-symmetric form. A skew symmetric matrix is a matrix
such that M = -M'
Two call forms
-ss = skew(v)
-v = skew(ss)
The first form builds a 3x3 skew-symmetric from a 3-element vector v.
The second form takes a 3x3 skew-symmetric matrix and returns the 3 unique
elements that it contains.
"""
def ss(b):
return matrix([
[0, -b[2], b[1]],
[b[2], 0, -b[0]],
[-b[1], b[0], 0]]);
if len(args) == 1:
# convert matrix to skew vector
b = args[0];
if isrot(b):
return 0.5*matrix( [b[2,1]-b[1,2], b[0,2]-b[2,0], b[1,0]-b[0,1]] );
elif ishomog(b):
return vstack( (b[0:3,3], 0.5*matrix( [b[2,1]-b[1,2], b[0,2]-b[2,0], b[1,0]-b[0,1]] ).T) );
# build skew-symmetric matrix
b = arg2array(b);
if len(b) == 3:
return ss(b);
elif len(b) == 6:
r = hstack( (ss(b[3:6]), mat(b[0:3]).T) );
r = vstack( (r, mat([0, 0, 0, 1])) );
return r;
elif len(args) == 3:
return ss(args);
elif len(args) == 6:
r = hstack( (ss(args[3:6]), mat(args[0:3]).T) );
r = vstack( (r, mat([0, 0, 0, 1])) );
return r;
else:
raise ValueError;
def tr2diff(t1, t2):
"""
Convert a transform difference to differential representation.
Returns the 6-element differential motion required to move
from T1 to T2 in base coordinates.
@type t1: 4x4 homogeneous transform
@param t1: Initial value
@type t2: 4x4 homogeneous transform
@param t2: Final value
@rtype: 6-vector
@return: Differential motion [dx dy dz drx dry drz]
@see: L{skew}
"""
t1 = mat(t1)
t2 = mat(t2)
d = concatenate(
(t2[0:3,3]-t1[0:3,3],
0.5*( crossp(t1[0:3,0], t2[0:3,0]) +
crossp(t1[0:3,1], t2[0:3,1]) +
crossp(t1[0:3,2], t2[0:3,2]) )
))
return d
################################## Utility
def trinterp(T0, T1, r):
"""
Interpolate homogeneous transformations.
Compute a homogeneous transform interpolation between C{T0} and C{T1} as
C{r} varies from 0 to 1 such that::
trinterp(T0, T1, 0) = T0
trinterp(T0, T1, 1) = T1
Rotation is interpolated using quaternion spherical linear interpolation.
@type T0: 4x4 homogeneous transform
@param T0: Initial value
@type T1: 4x4 homogeneous transform
@param T1: Final value
@type r: number
@param r: Interpolation index, in the range 0 to 1 inclusive
@rtype: 4x4 homogeneous transform
@return: Interpolated value
@see: L{quaternion}, L{ctraj}
"""
q0 = Q.quaternion(T0)
q1 = Q.quaternion(T1)
p0 = transl(T0)
p1 = transl(T1)
qr = q0.interp(q1, r)
pr = p0*(1-r) + r*p1
return vstack( (concatenate((qr.r(),pr),1), mat([0,0,0,1])) )
def trnorm(t):
"""
Normalize a homogeneous transformation.
Finite word length arithmetic can cause transforms to become `unnormalized',
that is the rotation submatrix is no longer orthonormal (det(R) != 1).
The rotation submatrix is re-orthogonalized such that the approach vector
(third column) is unchanged in direction::
N = O x A
O = A x N
@type t: 4x4 homogeneous transformation
@param t: the transform matrix to convert
@rtype: 3x3 orthonormal rotation matrix
@return: rotation submatrix
@see: L{oa2tr}
@bug: Should work for 3x3 matrix as well.
"""
t = mat(t) # N O A
n = crossp(t[0:3,1],t[0:3,2]) # N = O X A
o = crossp(t[0:3,2],t[0:3,0]) # O = A x N
return concatenate(( concatenate((unit(n),unit(t[0:3,1]),unit(t[0:3,2]),t[0:3,3]),1),
mat([0,0,0,1])))
def t2r(T):
"""
Return rotational submatrix of a homogeneous transformation.
@type T: 4x4 homogeneous transformation
@param T: the transform matrix to convert
@rtype: 3x3 orthonormal rotation matrix
@return: rotation submatrix
"""
if ishomog(T)==False:
error( 'input must be a homogeneous transform')
return T[0:3,0:3]
def r2t(R):
"""
Convert a 3x3 orthonormal rotation matrix to a 4x4 homogeneous transformation::
T = | R 0 |
| 0 1 |
@type R: 3x3 orthonormal rotation matrix
@param R: the rotation matrix to convert
@rtype: 4x4 homogeneous matrix
@return: homogeneous equivalent
"""
return concatenate( (concatenate( (R, zeros((3,1))),1), mat([0,0,0,1])) )
| [
"[email protected]"
] | |
5244256cfaf82bd7735b6e8a555dc572ce428f38 | d8d8fce19c88edc68f295c3ea0756ffe8576f982 | /bin/reportPatches.py | 2e8e58a6693daec37fd9b438f3cc2d819371309a | [] | no_license | mjuric/lsst-pipe_tasks | 4f178efd11b930d4c6bf3ed4ebce896ad8402537 | 585fa1b78ea99306edc9f89f98f0ce6618400240 | refs/heads/master | 2021-01-01T05:31:53.037714 | 2013-02-13T18:05:35 | 2013-02-13T18:05:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,206 | py | #!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2008, 2009, 2010, 2011, 2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
"""Select images and report which tracts and patches they are in
@warning: this is a very basic start. Misfeatures include:
- Only reports the best tract and patch containing the center of each image;
a proper implementation will report all tracts and patches that overlap each image
- One must specify a patch and tract even though those arguments are ignored.
"""
import numpy
import lsst.pex.config as pexConfig
import lsst.afw.coord as afwCoord
import lsst.afw.geom as afwGeom
import lsst.pipe.base as pipeBase
from lsst.pipe.tasks.makeSkyMap import MakeSkyMapTask
__all__ = ["ReportPatchesTask", "ReportPatchesArgumentParser"]
class ReportPatchesConfig(pexConfig.Config):
"""Config for ReportPatchesTask
"""
coaddName = pexConfig.Field(
doc = "coadd name: one of deep or goodSeeing",
dtype = str,
default = "deep",
)
raDecRange = pexConfig.ListField(
doc = "min RA, min Dec, max RA, max Dec (ICRS, deg)",
dtype = float,
length = 4,
)
class ReportPatchesTask(pipeBase.CmdLineTask):
"""Report which tracts and patches are needed for coaddition
"""
ConfigClass = ReportPatchesConfig
_DefaultName = "reportPatches"
def __init__(self, *args, **kwargs):
pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
@pipeBase.timeMethod
def run(self, dataRef):
"""Report tracts and patches that are within a given region of a skymap
@param dataRef: data reference for sky map.
@return: a pipeBase.Struct with fields:
- ccdInfoSetDict: a dict of (tractId, patchIndex): set of CcdExposureInfo
"""
skyMap = dataRef.get(self.config.coaddName + "Coadd_skyMap")
# make coords in the correct order to form an enclosed space
raRange = (self.config.raDecRange[0], self.config.raDecRange[2])
decRange = (self.config.raDecRange[1], self.config.raDecRange[3])
raDecList = [
(raRange[0], decRange[0]),
(raRange[1], decRange[0]),
(raRange[1], decRange[1]),
(raRange[0], decRange[1]),
]
coordList = [
afwCoord.IcrsCoord(afwGeom.Angle(ra, afwGeom.degrees), afwGeom.Angle(dec, afwGeom.degrees))
for ra, dec in raDecList]
tractPatchList = skyMap.findTractPatchList(coordList)
for tractInfo, patchInfoList in tractPatchList:
for patchInfo in patchInfoList:
patchIndex = patchInfo.getIndex()
print "tract=%d patch=%d,%d" % (tractInfo.getId(), patchIndex[0], patchIndex[1])
@classmethod
def _makeArgumentParser(cls):
"""Create an argument parser
Use datasetType="deepCoadd" to get the right keys (even chi-squared coadds
need filter information for this particular task).
"""
return ReportPatchesArgumentParser(name=cls._DefaultName, datasetType="deepCoadd")
def _getConfigName(self):
"""Don't persist config, so return None
"""
return None
def _getMetadataName(self):
"""Don't persist metadata, so return None
"""
return None
class ReportPatchesArgumentParser(pipeBase.ArgumentParser):
"""A version of lsst.pipe.base.ArgumentParser specialized for reporting images.
Required because there is no dataset type that is has exactly the right keys for this task.
datasetType = namespace.config.coaddName + "Coadd" comes closest, but includes "patch" and "tract",
which are irrelevant to the task, but required to make a data reference of this dataset type.
Also required because butler.subset cannot handle this dataset type.
"""
def _makeDataRefList(self, namespace):
"""Make namespace.dataRefList from namespace.dataIdList
"""
datasetType = namespace.config.coaddName + "Coadd"
namespace.dataRefList = []
for dataId in namespace.dataIdList:
expandedDataId = dict(patch=0, tract=(0,0))
expandedDataId.update(dataId)
dataRef = namespace.butler.dataRef(
datasetType = datasetType,
dataId = expandedDataId,
)
namespace.dataRefList.append(dataRef)
if __name__ == "__main__":
ReportPatchesTask.parseAndRun()
| [
"[email protected]"
] | |
902e6ddb8c5ff647d175b814fc0a296e4e136f3e | 1315e1c8357f1bae712db6e3ebd3e76902173959 | /src/app/agents/authorize.py | ddd0fa9a459158d404a713525161a2f8c71b4033 | [] | no_license | jldupont/musync | e2e68d85db40c9eb4f0369c25a4b73426b1d54c0 | b52908b263ec7e18d1433dc27fa75e092fa415aa | refs/heads/master | 2021-01-23T21:37:51.762597 | 2010-08-27T01:29:12 | 2010-08-27T01:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,442 | py | """
Authorization Agent
Responsible for handling the authorization process
with the user
Messages Processed:
- "start_authorize"
- "start_verify"
- "oauth_error"
- "oauth?"
Messages Generated:
- "error_requesttoken"
- "error_webbrowser"
- "error_accesstoken"
Created on 2010-08-15
@author: jldupont
"""
__all__=["AuthorizeAgent"]
import oauth.oauth as oauth
import httplib
import webbrowser
from app.system.base import AgentThreadedBase
from app.system.state import StateManager
class OauthClient(object):
gREQUEST_TOKEN_URL = 'OAuthGetRequestToken'
gACCESS_TOKEN_URL = 'OAuthGetAccessToken'
gAUTHORIZATION_URL = 'OAuthAuthorizeToken'
def __init__(self, server, port, base):
self.server=server
self.port=port
self.base=base
self.request_token_url=self.base+self.gREQUEST_TOKEN_URL
self.access_token_url=self.base+self.gACCESS_TOKEN_URL
self.authorize_token_url=self.base+self.gAUTHORIZATION_URL
self.connection = httplib.HTTPConnection("%s:%d" % (self.server, self.port))
def fetch_request_token(self, oauth_request):
self.connection.request(oauth_request.http_method, self.request_token_url, headers=oauth_request.to_header())
response = self.connection.getresponse()
return oauth.OAuthToken.from_string(response.read())
def fetch_access_token(self, oauth_request):
self.connection.request(oauth_request.http_method, self.access_token_url, headers=oauth_request.to_header())
response = self.connection.getresponse()
return oauth.OAuthToken.from_string(response.read())
def authorize_token(self, oauth_request):
self.connection.request(oauth_request.http_method, oauth_request.to_url())
response = self.connection.getresponse()
return response.read()
class AuthorizeAgent(AgentThreadedBase):
CALLBACK_URL = "oob"
REQUEST_TOKEN="oauth_request_token"
ACCESS_TOKEN_KEY="oauth_access_token_key"
ACCESS_TOKEN_SECRET="oauth_access_token_secret"
VERIFICATION_CODE="oauth_verification_code"
def __init__(self, app_name, server, port, consumer_key, consumer_secret, base):
"""
@param interval: interval in seconds
"""
AgentThreadedBase.__init__(self)
self.server=server
self.port=port
self.base=base
self.consumer_key=consumer_key
self.consumer_secret=consumer_secret
self.app_name=app_name
self.client=OauthClient(server, port, base)
self.consumer=None
self.signature_method_plaintext = oauth.OAuthSignatureMethod_PLAINTEXT()
self.signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.token=None
self.sm=StateManager(self.app_name)
def h_start_authorize(self, *_):
try:
self.token=None
self.consumer = oauth.OAuthConsumer(self.consumer_key, self.consumer_secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer,
callback=self.CALLBACK_URL,
http_url=self.client.request_token_url)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, None)
self.token = self.client.fetch_request_token(oauth_request)
oauth_request = oauth.OAuthRequest.from_token_and_callback(token=self.token,
http_url=self.client.authorize_token_url)
url= oauth_request.to_url()
self.sm.save(self.REQUEST_TOKEN, self.token)
except Exception,e:
self.pub("error_requesttoken", e)
self.pub("log", "warning", "Authorization: 'RequestToken' failed: "+str(e))
return
self.pub("log", "getting authorization from url: "+url)
try:
webbrowser.open(url)
print url
except Exception,e:
self.pub("log", "error", "Opening url(%s)" % url)
def h_start_verify(self, verificationCode):
"""
Got verification code from user
Attempting to retrieve "access token"
"""
try:
self.consumer = oauth.OAuthConsumer(self.consumer_key, self.consumer_secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, token=self.token,
verifier=verificationCode,
http_url=self.client.access_token_url)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, self.token)
self.atoken = self.client.fetch_access_token(oauth_request)
except Exception,e:
self.atoken=None
self.sm.save(self.ACCESS_TOKEN_KEY, "")
self.sm.save(self.ACCESS_TOKEN_SECRET, "")
self.pub("oauth", None, None)
self.pub("error_accesstoken", e)
self.pub("log", "warning", "Verification: 'AccessToken' failed: "+str(e))
return
finally:
self.sm.save(self.VERIFICATION_CODE, verificationCode)
try:
key=self.atoken.key
secret=self.atoken.secret
self.pub("oauth", key, secret)
self.pub("log", "oauth: key: %s secret: %s" % (key, secret))
self.sm.save(self.ACCESS_TOKEN_KEY, key)
self.sm.save(self.ACCESS_TOKEN_SECRET, secret)
except:
self.sm.save(self.ACCESS_TOKEN_KEY, "")
self.sm.save(self.ACCESS_TOKEN_SECRET, "")
self.pub("log", "warning", "Verification: 'AccessToken' failed: "+str(e))
def h_oauth_error(self, *_):
"""
An oauth level error occured - reset access token
"""
self.sm.save(self.ACCESS_TOKEN, "")
self.sm.save(self.VERIFICATION_CODE, "")
def hq_oauth(self):
key=self.sm.retrieve(self.ACCESS_TOKEN_KEY)
secret=self.sm.retrieve(self.ACCESS_TOKEN_SECRET)
self.pub("oauth", key, secret)
"""
_=AuthorizeAgent()
_.start()
""" | [
"[email protected]"
] | |
2c2198547b61fdbeb366057c6b3ffc9759df27f8 | 5963c12367490ffc01c9905c028d1d5480078dec | /tests/components/met/test_init.py | 64323af56ce222c79f5d0d50a696796b676ae555 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 1,381 | py | """Test the Met integration init."""
from homeassistant.components.met.const import (
DEFAULT_HOME_LATITUDE,
DEFAULT_HOME_LONGITUDE,
DOMAIN,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.config_entries import ConfigEntryState
from . import init_integration
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_fail_default_home_entry(hass, caplog):
"""Test abort setup of default home location."""
await async_process_ha_core_config(
hass,
{"latitude": 52.3731339, "longitude": 4.8903147},
)
assert hass.config.latitude == DEFAULT_HOME_LATITUDE
assert hass.config.longitude == DEFAULT_HOME_LONGITUDE
entry = await init_integration(hass, track_home=True)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_ERROR
assert (
"Skip setting up met.no integration; No Home location has been set"
in caplog.text
)
| [
"[email protected]"
] | |
7a0f7b03eaaf6ee2ded1913f70ceb02941f42851 | ad080bd1612b980490ef2d1b61647cbc6beddf5d | /my_game/diplomacy/send_mail.py | 89f7c3c35034326843b74ea6aeb2f25eeefc6298 | [] | no_license | rokealva83/my_game | 8f915076986144234950aa4443e8bc51ad019664 | 76ecc1dbf60c7f93621ddca66d62d5fea2826d0e | refs/heads/master | 2020-12-24T17:54:59.491881 | 2016-05-10T20:06:53 | 2016-05-10T20:06:53 | 29,264,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from django.shortcuts import render
from my_game.models import MyUser, UserCity, Warehouse
from my_game import function
from my_game.models import Mail
def send_mail(request):
if "live" not in request.session:
return render(request, "index.html", {})
else:
session_user = MyUser.objects.filter(id=int(request.session['user'])).first()
session_user_city = UserCity.objects.filter(id=int(request.session['user_city'])).first()
function.check_all_queues(session_user)
target = request.POST.get('message_target')
target_name = MyUser.objects.filter(user_name=target).first()
message = ''
if target_name is None:
message = 'Нет такого пользователя'
else:
title = request.POST.get('title')
mail = request.POST.get('message')
user = MyUser.objects.filter(user_id=session_user).first()
user_name = user.user_name
new_mail = Mail(
user=target_name.user_id,
recipient=session_user,
time=datetime.now(),
status=1,
category=1,
login_recipient=user_name,
title=title,
message=mail
)
new_mail.save()
mails = Mail.objects.filter(user=session_user).order_by('category', '-time')
warehouses = Warehouse.objects.filter(user=session_user, user_city=session_user_city).order_by('resource_id')
user_citys = UserCity.objects.filter(user=session_user)
request.session['user'] = session_user.id
request.session['user_city'] = session_user_city.id
request.session['live'] = True
output = {'user': session_user, 'warehouses': warehouses, 'user_city': session_user_city,
'user_citys': user_citys, 'mails': mails, 'message': message}
return render(request, "diplomacy.html", output)
| [
"[email protected]"
] | |
a97607aa70412fb502d24b6319285ac72592a6b5 | f662bd04d2f29ef25bbfd7e768b1e57dfbba4d9f | /apps/plmejoras/migrations/0002_plan_mejoras_activo.py | 2d3ce174039be499a21756157156df72a31334f2 | [] | no_license | DARKDEYMON/sisevadoc | f59b193688f7eca7c140a03ee414f5d20ada78c7 | 9fc0943200986824a2aab2134fdba5c9f3315798 | refs/heads/master | 2020-03-19T03:27:07.907125 | 2019-12-11T13:30:43 | 2019-12-11T13:30:43 | 135,729,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # Generated by Django 2.0.8 on 2019-02-13 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plmejoras', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='plan_mejoras',
name='activo',
field=models.BooleanField(default=True),
),
]
| [
"[email protected]"
] | |
4f2e66526c5ab51faf1f6d381c56f55f00e4bf5d | fa76868608739eb514c7bf9cb3be6ca1a0283409 | /l3-patterns+descriptors+metaclasses/lesson/abstract_classes.py | f4ac7bd0eb21302ca449c521709e5df6c6f295a9 | [] | no_license | k1r91/course2 | efa4b200f19798275251d1b737613cf4560e3f47 | a4b0413030e17d37406feb8f58314356e3ab15e3 | refs/heads/master | 2021-08-16T04:04:26.796036 | 2018-10-23T17:04:54 | 2018-10-23T17:04:54 | 135,111,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from abc import ABCMeta, abstractmethod, abstractproperty
class Foo(metaclass=ABCMeta):
@abstractmethod
def spam(self, a, b):
pass
@property
@abstractmethod
def name(self, asd):
pass
class Grok:
pass
Foo.register(Grok)
g = Grok()
print(isinstance(g, Foo)) | [
"[email protected]"
] | |
a351715c2f009f811b5f12fe749143736ea6a79e | 9269bbcf34563ba16602b693858cae2908c8505c | /Python/racy/plugins/libext/sconsbuilders/mkdir.py | 10013540ef0215f405a79ce0f9b3c4578f6cb368 | [
"BSD-3-Clause"
] | permissive | cfobel/sconspiracy | 4bfe4066731ecbfb781d17d3014c5b4bdb201396 | 478876b2b033d313085a33ac0f7647da18a8439a | refs/heads/master | 2021-01-04T22:32:52.809083 | 2012-02-21T15:11:35 | 2012-02-21T15:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # -*- coding: UTF8 -*-
import os
import SCons.Node
import utils
def MkdirArgs(target, source, env):
args = []
args.extend(env.get('ARGS',[]))
args = map(env.subst, args)
return args
@utils.marker_decorator
def Mkdir(target, source, env):
for d in MkdirArgs(target, source, env):
env.Execute(SCons.Script.Mkdir(env.Dir(d)))
return None
def MkdirString(target, source, env):
""" Information string for Mkdir """
args = MkdirArgs(target, source, env)
return env.subst('[${CURRENT_PROJECT}]: mkdir ') + ' '.join(args)
def generate(env):
action = SCons.Action.Action(Mkdir, MkdirString)
builder = env.Builder( action = action )
env.Append(BUILDERS = {'Mkdir' : builder})
| [
"none@none"
] | none@none |
91da0924f0be6bd28259ad79770de110838e7057 | 8f5aa55a8860a33290692a3455b75bc512a369bb | /controller/report.py | d485cbb30f9896976f82b80d90c8db5039b42b09 | [] | no_license | Trafire/PurchaseReports | c683072712988f50154f6bf301e0e82b8ef92d4e | 71f2ae13b366d186fef9c524cd443b78c46cdb6f | refs/heads/master | 2023-02-20T12:16:32.207351 | 2021-01-17T17:57:32 | 2021-01-17T17:57:32 | 330,453,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,294 | py | from autof2.dailytasks import purchaselist
from openpyxl import Workbook
from openpyxl.worksheet.table import Table
from openpyxl.worksheet.table import TableStyleInfo
from openpyxl import load_workbook
import openpyxl
import datetime, time
from datetime import date
from autof2.interface import send_data
from autof2.navigation import navigation
import os
import os.path
def get_date_sunday(year, week):
week -= 1
d = str(year) + '-W' + str(week)
return datetime.datetime.strptime(d + '-1', "%Y-W%W-%w") - datetime.timedelta(days=1)
def get_today():
year = datetime.date.today().strftime("%Y")
week = datetime.date.today().strftime("%W")
day = datetime.date.today().strftime("%w")
d = str(year) + '-W' + str(week)
return datetime.datetime.strptime(d + '-' + day, "%Y-W%W-%w")
def get_current_week(add=0):
date = (datetime.datetime.now() + datetime.timedelta(days=add * 7)).isocalendar()
if date[2] == 7:
d = datetime.datetime.now() + datetime.timedelta(days=7)
date = d.isocalendar()
week = date[1]
year = date[0]
return (year, week)
def get_order_week(year, week):
current = get_date_sunday(year, week)
product = []
print("\nstarting Week %i:" % week)
for i in range(7):
str_date = current.strftime('%d/%m/%y')
if current >= get_today():
print("\tprocessing day - %s" % current.strftime('%d/%m/%y'), end=" ")
try:
new_product = purchaselist.run_all_purchase_list_report(str_date, str_date)
except:
new_product = purchaselist.run_all_purchase_list_report(str_date, str_date)
for p in new_product:
p.date = str_date
print(" lines found = %i" % len(new_product))
product.extend(new_product)
## print(current.strftime('%d/%m/%y'))
send = send_data.SendData()
send.send('{LEFT}')
current += datetime.timedelta(days=1)
print("Week %i total lines = %i" % (week, len(product)))
return product
def make_order_sheet(wb, product, year, week):
ws = wb.active
rows = 1
ws.append(product[0].excel_heading())
for line in product:
ws.append(line.excel_data())
rows += 1
right_corner = chr(64 + len(product[0].excel_heading())) + str(rows)
# define a table style
mediumStyle = openpyxl.worksheet.table.TableStyleInfo(name='TableStyleMedium2',
showRowStripes=True)
# create a table
table = openpyxl.worksheet.table.Table(ref='A1:' + right_corner,
displayName='orders',
tableStyleInfo=mediumStyle)
# add the table to the worksheet
ws.title = "orders"
ws.add_table(table)
def go_to_puchase_list():
for i in range(10):
if navigation.to_purchase_list():
return True
return False
def get_filename(directory, year, week):
directory += '\\%s\\week %s' % (year, week)
if not os.path.exists(directory):
os.makedirs(directory)
# save the workbook file
return directory + '\\week ' + str(week) + ' orders' + '.xlsx'
def create_report(year, week, directory = os.getcwd()):
if go_to_puchase_list():
print(year,week)
product = get_order_week(year, week)
product.sort()
if product:
wb = Workbook()
make_order_sheet(wb, product, year, week)
# create directory
filename = get_filename(directory, year, week)
## filename = "test2.xlsx"
bought = {}
if os.path.isfile(filename):
wb2 = load_workbook(filename)
a = (wb2['purchases'])
index = 0
for row in a.rows:
if index == 0:
index += 1
categories_order = row
else:
p = {}
for i in range(len(categories_order)):
p[categories_order[i].value] = row[i].value
if p['PurchaseID'] in bought:
bought[p['PurchaseID']]['Confirmed'] += p['Confirmed']
else:
bought[p['PurchaseID']] = p
bought[p['PurchaseID']]['Ordered'] = 0
for p in product:
if p.key not in bought:
bought[p.key] = p.excel_order_dict_vers()
else:
bought[p.key]['Ordered'] += p.quantity
product_list = []
for b in bought:
product_list.append(bought[b])
## wb = Workbook()
ws2 = wb.create_sheet()
rows = 1
headings = (
"PurchaseID", "f2_supplier", "Category", "Variety", "Colour", "Grade", "Supplier", "Price", "Ordered",
"Confirmed")
ws2.append(headings + ("Total",))
for line in bought:
l = []
for h in headings:
l.append(bought[line][h])
l.append("=J%s - I%s" % (rows + 1, rows + 1))
ws2.append(l)
rows += 1
right_corner = chr(64 + 1 + len(product[0].excel_order_headings())) + str(rows)
# define a table style
mediumStyle = openpyxl.worksheet.table.TableStyleInfo(name='TableStyleMedium2',
showRowStripes=True)
# create a table
table = openpyxl.worksheet.table.Table(ref='A1:' + right_corner,
displayName='purchases',
tableStyleInfo=mediumStyle)
# add the table to the worksheet
ws2.title = "purchases"
ws2.add_table(table)
# save the workbook file
## wb.save('test_1'.replace(':','-').replace('.','-') + '.xlsx')
##
try:
wb.save(filename)
except:
print("did not save")
| [
"[email protected]"
] | |
6ee75371d97b8eceea349d31764cc8281dbc0158 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_route_tables_operations.py | 95eaeaf558739622adfc86655864fb7d3ec3ccbc | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 22,755 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-07-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-07-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def get(
self, resource_group_name, route_table_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteTable or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2020_07_01.models.RouteTable or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _create_or_update_initial(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RouteTable')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route
table operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.RouteTable
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2020_07_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2020_07_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def update_tags(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteTable or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2020_07_01.models.RouteTable or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2020_07_01.models.RouteTablePaged[~azure.mgmt.network.v2020_07_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2020_07_01.models.RouteTablePaged[~azure.mgmt.network.v2020_07_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'}
| [
"[email protected]"
] | |
be877c7774e1bb701cf61046bdf12f27d5bf2d0f | 5b3090dece7d3d276922f53bfba18fdff3a5ba12 | /app/base/config.py | 4fa810801a20b3d033452130877cc6a43e3b5644 | [
"MIT"
] | permissive | HsOjo/PyJSONEditor | 338978b36a545982bec7285ba1de9aa5704f39b0 | c2cf5398fa569ba0575048f3deebbf23028a61a1 | refs/heads/master | 2020-06-30T00:35:40.215143 | 2019-10-15T11:27:01 | 2019-10-15T11:27:01 | 200,668,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | import base64
import json
import os
import platform
import sys
from app.res.const import Const
from app.util import object_convert
from app.util.log import Log
sys_type = platform.system()
if sys_type == 'Darwin':
CONFIG_NAME = ('com.%s.%s' % (Const.author, Const.app_name)).lower()
CONFIG_PATH = os.path.expanduser('~/Library/Application Support/%s' % CONFIG_NAME)
else:
CONFIG_NAME = '%s.cfg' % Const.app_name
CONFIG_PATH = '%s/%s' % (os.path.dirname(sys.executable), CONFIG_NAME)
class ConfigBase:
_protect_fields = []
_config_path = CONFIG_PATH
language = 'en'
def load(self):
try:
if os.path.exists(self._config_path):
with open(self._config_path, 'r') as io:
config = json.load(io)
for f in self._protect_fields:
config[f] = base64.b64decode(config[f][::-1].encode()).decode()
object_convert.dict_to_object(config, self, new_fields=False)
replaces = dict([(getattr(self, f), Const.protector) for f in self._protect_fields])
Log.set_replaces(replaces)
Log.append('config_load', 'Info', object_convert.object_to_dict(self))
except:
self.save()
def save(self):
with open(self._config_path, 'w') as io:
config = object_convert.object_to_dict(self)
for f in self._protect_fields:
config[f] = base64.b64encode(config[f].encode()).decode()[::-1]
json.dump(config, io, indent=' ')
Log.append('config_save', 'Info', object_convert.object_to_dict(self))
def clear(self):
if os.path.exists(self._config_path):
os.unlink(self._config_path)
| [
"[email protected]"
] | |
d60f887276a626cc23bd15b52d1b2af930c4090c | badf813b23670f38233a2f66031df33b12d6685c | /tests/test_plotting.py | 28ec689c4c5b8ed7e4ea260c1a73f340c0a70458 | [
"MIT"
] | permissive | healthonrails/annolid | 6ef2de72bc666e247ae51ae1a5df3d75337fc28c | 730f7dff2239ef716841390311b5b9250149acaf | refs/heads/main | 2023-09-01T20:52:14.857248 | 2023-09-01T14:34:34 | 2023-09-01T14:34:34 | 290,017,987 | 25 | 8 | MIT | 2022-05-03T14:36:21 | 2020-08-24T19:14:07 | Jupyter Notebook | UTF-8 | Python | false | false | 843 | py | import os
import numpy as np
import pandas as pd
from annolid.postprocessing.plotting import plot_trajactory
def test_plot_trajactory():
tracking_csv = '/tmp/tracking.csv'
cx = np.random.randint(0, 100, size=100)
cy = np.random.randint(0, 100, size=100)
instance_name = ['mouse'] * 100
df = pd.DataFrame({'cx': cx,
'cy': cy,
'instance_name': instance_name})
df.to_csv(tracking_csv, index=False)
plot_trajactory(tracking_csv, instance_name="mouse",
title="Trajectory",
xlabel="X position for instance centroid",
ylabel="Y position for instance centroid",
save_path='/tmp/trajectory.png',
trajactory_color_style='b-')
assert os.path.isfile('/tmp/trajectory.png')
| [
"[email protected]"
] | |
23e636f36c7413ef55ccef2d4ace1aa86d27543e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Python wow/models/items/loot_table.py | c59db565ea77b475438d958eda7505fb8b440c3c | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:71adc2c7aa6f6a777843ec5491e3b43f745ce5167f6810246ac3946d4ec95a0b
size 6943
| [
"[email protected]"
] | |
6f94a4b8651fca9e5f6dfe42b57d55cebbf1eaab | 3851a5f2233aa68ae98aa4cd813e0a6dcbda464e | /spider/jiaoben/anjvkexzl (1).py | 62d19c7f400f3ca1116d5840a905ace18a722522 | [] | no_license | scmsqhn/yunying | 976a2c9fff98613361d4b28719080d9e4d8112dc | 3c30b6985ac974bc75d50e8abe0b69174fb46700 | refs/heads/master | 2021-01-19T21:06:21.778902 | 2017-04-25T09:14:00 | 2017-04-25T09:14:00 | 88,607,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,631 | py | # -*- coding: utf-8 -*-
import scrapy
import re
import time
from lxml import etree
from scrapy import log
import random
import requests
from myscrapy.items import MyscrapyItemcd
import logging
'''
1. logging.CRITICAL - for critical errors (highest severity) 致命错误
2. logging.ERROR - for regular errors 一般错误
3. logging.WARNING - for warning messages 警告+错误
4. logging.INFO - for informational messages 消息+警告+错误
5. logging.DEBUG - for debugging messages (lowest severity) 低级别
'''
logging.warning("This is a warning")
logging.log(logging.WARNING,"This is a warning")
from myscrapy.middlewares import agents
from myscrapy.middlewares import proxys
class AnjvkexzlSpider(scrapy.Spider):
name = "anjvkexzl"
allowed_domains = ["cd.xzl.anjuke.com"]
start_urls = 'http://cd.xzl.anjuke.com/'
handle_httpstatus_list = [111, 404, 500]
def dum(self):
time.sleep(random.randint(1, 3))
def start_requests(self):
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 \
Safari/537.36 SE 2.X MetaSr 1.0'
headers = {'User-Agent': user_agent}
yield scrapy.Request(url=self.start_urls, headers=headers, method='GET', callback=self.parse)
def parse(self, response):
if response.status in self.handle_httpstatus_list:
proxy = random.choice(proxys)
agent = random.choice(agents)
headers.setdefault('User-Agent', agent)
metas = "http://%s" % proxy['ip_port']
self.logger.info('=================START_parse_404===================')
self.logger.info('Spider opened: %s, %s' % (spider.name,request.meta['proxy']))
self.logger.info('request: %s, %s' % (request.url, exception))
self.logger.info('=================END_parse_404===================')
yield scrapy.Request(url=response.url, callback=self.parse, meta={"proxy":metas}, method='GET', dont_filter=True )
else:
scrapy.log.msg()
lists = response.body.decode('utf-8')
selector = etree.HTML(lists)
area_list = selector.xpath('/html/body/div[5]/div[2]/div/div[1]/div/a')
for area in range[2:len(area_list)]:
area_url = iselector.xpath('//*[@id="list-content"]/div[%d]' % area)
print(area_url)
self.log(('Parse function called on %s', response.url),level=log.INFO)
self.log(('Parse function called on %s', response.url),level=log.INFO)
yield scrapy.Request(url=area_url, callback=self.detail_url, dont_filter=True )
#'http://cd.lianjia.com/ershoufang/dongcheng/pg2/'
def detail_url(self,response):
for i in range(1, 101):
self.dum()
contents = etree.HTML(response.body.decode('utf-8'))
col1 = contents.xpath('//*[@id="fy_info"]/ul[1]')
col2 = contents.xpath('//*[@id="fy_info"]/ul[2]')
cols[2] = [col1, col2]
self.dum();
for col in cols:
for i in col:
item = AnjvkexzlItem()
item['zizujin'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['yuezujin'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['loupan'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['dizhi'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['ditie'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['jzmianji'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['louceng'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['gongweishu'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['wuye'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['pingjia'] = house.xpath('//*[@id="xzl_desc"]/div/text()')
self.logger.info('item is %s' % item)
latitude = contents.xpath('/html/body/script[11]/text()').pop()
relat = '''lat: ".*'''
relng = '''lng: ".*'''
j = re.search(relat, latitude)
w = re.search(relng, latitude)
j = j.split(':')[1]
w = w.split(':')[1]
item['jwd'] = jwd[j,w]
yield item
| [
"[email protected]"
] | |
5cf0ddbe1552569d850eeea5f21edb458b930f1b | 7f73b32886f69e34dcef53b6593727effdc2fdf5 | /sentence_transformers/models/WordEmbeddings.py | a235b3af1000f3ad8951d66c310cefcb3f74575c | [
"Apache-2.0"
] | permissive | gabbage/sentence-transformers | bac116f35b5ba61bc64f35149a1963db851e5552 | 4a5308479bbb0bac7c0f60a3b2f6a01ebdfa2aa0 | refs/heads/master | 2020-07-07T01:46:56.442696 | 2019-08-19T16:24:56 | 2019-08-19T16:24:56 | 203,205,790 | 0 | 0 | null | 2019-08-19T16:03:58 | 2019-08-19T16:03:58 | null | UTF-8 | Python | false | false | 5,836 | py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import logging
import gzip
from tqdm import tqdm
import numpy as np
import os
import json
from ..util import import_from_string, fullname, http_get
from .tokenizer import WordTokenizer, WhitespaceTokenizer
class WordEmbeddings(nn.Module):
def __init__(self, tokenizer: WordTokenizer, embedding_weights, update_embeddings: bool = False, max_seq_length: int = 1000000):
nn.Module.__init__(self)
if isinstance(embedding_weights, list):
embedding_weights = np.asarray(embedding_weights)
if isinstance(embedding_weights, np.ndarray):
embedding_weights = torch.from_numpy(embedding_weights)
num_embeddings, embeddings_dimension = embedding_weights.size()
self.embeddings_dimension = embeddings_dimension
self.emb_layer = nn.Embedding(num_embeddings, embeddings_dimension)
self.emb_layer.load_state_dict({'weight': embedding_weights})
self.emb_layer.weight.requires_grad = update_embeddings
self.tokenizer = tokenizer
self.update_embeddings = update_embeddings
self.max_seq_length = max_seq_length
def forward(self, features):
token_embeddings = self.emb_layer(features['input_ids'])
cls_tokens = None
features.update({'token_embeddings': token_embeddings, 'cls_token_embeddings': cls_tokens, 'input_mask': features['input_mask']})
return features
def get_sentence_features(self, tokens: List[str], pad_seq_length: int):
pad_seq_length = min(pad_seq_length, self.max_seq_length)
tokens = tokens[0:pad_seq_length] #Truncate tokens if needed
input_ids = tokens
sentence_length = len(input_ids)
input_mask = [1] * len(input_ids)
padding = [0] * (pad_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
assert len(input_ids) == pad_seq_length
assert len(input_mask) == pad_seq_length
return {'input_ids': input_ids, 'input_mask': input_mask, 'sentence_lengths': sentence_length}
return {'input_ids': np.asarray(input_ids, dtype=np.int),
'input_mask': np.asarray(input_mask, dtype=np.int),
'sentence_lengths': np.asarray(sentence_length, dtype=np.int)}
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[str]:
return self.tokenizer.tokenize(text)
def save(self, output_path: str):
with open(os.path.join(output_path, 'wordembedding_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
self.tokenizer.save(output_path)
def get_config_dict(self):
return {'tokenizer_class': fullname(self.tokenizer), 'update_embeddings': self.update_embeddings, 'max_seq_length': self.max_seq_length}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'wordembedding_config.json'), 'r') as fIn:
config = json.load(fIn)
tokenizer_class = import_from_string(config['tokenizer_class'])
tokenizer = tokenizer_class.load(input_path)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
embedding_weights = weights['emb_layer.weight']
model = WordEmbeddings(tokenizer=tokenizer, embedding_weights=embedding_weights, update_embeddings=config['update_embeddings'])
return model
@staticmethod
def from_text_file(embeddings_file_path: str, update_embeddings: bool = False, item_separator: str = " ", tokenizer=WhitespaceTokenizer(), max_vocab_size: int = None):
logging.info("Read in embeddings file {}".format(embeddings_file_path))
if not os.path.exists(embeddings_file_path):
logging.info("{} does not exist, try to download from server".format(embeddings_file_path))
if '/' in embeddings_file_path or '\\' in embeddings_file_path:
raise ValueError("Embeddings file not found: ".format(embeddings_file_path))
url = "https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/"+embeddings_file_path
http_get(url, embeddings_file_path)
embeddings_dimension = None
vocab = []
embeddings = []
with gzip.open(embeddings_file_path, "rt", encoding="utf8") if embeddings_file_path.endswith('.gz') else open(embeddings_file_path, encoding="utf8") as fIn:
iterator = tqdm(fIn, desc="Load Word Embeddings", unit="Embeddings")
for line in iterator:
split = line.rstrip().split(item_separator)
word = split[0]
if embeddings_dimension == None:
embeddings_dimension = len(split) - 1
vocab.append("PADDING_TOKEN")
embeddings.append(np.zeros(embeddings_dimension))
if (len(split) - 1) != embeddings_dimension: # Assure that all lines in the embeddings file are of the same length
logging.error("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")
continue
vector = np.array([float(num) for num in split[1:]])
embeddings.append(vector)
vocab.append(word)
if max_vocab_size is not None and max_vocab_size > 0 and len(vocab) > max_vocab_size:
break
embeddings = np.asarray(embeddings)
tokenizer.set_vocab(vocab)
return WordEmbeddings(tokenizer=tokenizer, embedding_weights=embeddings, update_embeddings=update_embeddings)
| [
"[email protected]"
] | |
882cb593206aad566ecbacb6fa9144344bd399b9 | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/Ladder_11_15_A/134. LRU Cache.py | abb130048a64c5c6df4397b67d81b496d32268fd | [
"MIT"
] | permissive | ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | class Node:
def __init__(self, key="", val=-1, prev=None, next=None):
self.key = key
self.val = val
self.prev = prev
self.next = next
class LRUCache:
"""
@param: capacity: An integer
"""
def __init__(self, capacity):
self.capacity = capacity
self.mapping = {}
self.head = None
self.tail = None
"""
@param: key: An integer
@return: An integer
"""
def get(self, key):
if key not in self.mapping:
return -1
node = self.mapping[key]
self.__moveToHead(node)
return node.val
"""
@param: key: An integer
@param: value: An integer
@return: nothing
"""
def set(self, key, value):
if key not in self.mapping:
if len(self.mapping) >= self.capacity:
self.removeTail()
new_node = Node(key, value, None, self.head)
self.mapping[key] = new_node
if self.head:
self.head.prev = new_node
self.head = new_node
if self.tail is None:
self.tail = self.head
else:
node = self.mapping[key]
node.val = value
self.__moveToHead(node)
def __moveToHead(self, node):
if node is self.head:
return
if node.prev:
node.prev.next = node.next
if node.next:
node.next.prev = node.prev
if node is self.tail:
self.tail = node.prev
self.head.prev = node
node.next = self.head
self.head = node
def removeTail(self):
if self.tail.prev:
self.tail.prev.next = None
del self.mapping[self.tail.key]
self.tail = self.tail.prev
| [
"[email protected]"
] | |
1743aa0b591c3eb8da10ea9d4d5551356ad61da9 | 4510bbf54e2ca619c3a863f5ca03df6584585402 | /tfx/examples/custom_components/container_components/download_grep_print_pipeline.py | 2ce8d69992f2a165d0a9b87bd887d7450af33c60 | [
"Apache-2.0"
] | permissive | Mdlglobal-atlassian-net/tfx | e55f38336d1989ac970b5069c7128097ed86b422 | 37cbbb95c65e1a891045dd13232a7f2a293a7b70 | refs/heads/master | 2022-10-02T07:44:41.180873 | 2020-06-01T18:49:15 | 2020-06-01T18:49:53 | 268,607,840 | 0 | 1 | Apache-2.0 | 2020-06-01T19:01:51 | 2020-06-01T19:01:50 | null | UTF-8 | Python | false | false | 4,376 | py | # Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container-based pipeline sample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.types import standard_artifacts
downloader_component = container_component.create_container_component(
name='DownloadFromHttp',
outputs={
'data': standard_artifacts.ExternalArtifact,
},
parameters={
'url': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
url="$0"
output_data_uri="$1"
output_data_path=$(mktemp)
# Running the main code
wget "$0" -O "$output_data_path" || curl "$0" > "$output_data_path"
# Getting data out of the container
gsutil cp "$output_data_path" "$output_data_uri"
''',
placeholders.InputValuePlaceholder('url'),
placeholders.OutputUriPlaceholder('data'),
],
)
grep_component = container_component.create_container_component(
name='FilterWithGrep',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
outputs={
'filtered_text': standard_artifacts.ExternalArtifact,
},
parameters={
'pattern': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
pattern="$0"
text_uri="$1"
text_path=$(mktemp)
filtered_text_uri="$2"
filtered_text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
grep "$pattern" "$text_path" >"$filtered_text_path"
# Getting data out of the container
gsutil cp "$filtered_text_path" "$filtered_text_uri"
''',
placeholders.InputValuePlaceholder('pattern'),
placeholders.InputUriPlaceholder('text'),
placeholders.OutputUriPlaceholder('filtered_text'),
],
)
print_component = container_component.create_container_component(
name='Print',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
text_uri="$0"
text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
cat "$text_path"
''',
placeholders.InputUriPlaceholder('text'),
],
)
def create_pipeline_component_instances(text_url: Text, pattern: Text):
"""Creates tasks for the download_grep_print pipeline."""
downloader_task = downloader_component(url=text_url)
grep_task = grep_component(
text=downloader_task.outputs['data'],
pattern=pattern,
)
print_task = print_component(
text=grep_task.outputs['filtered_text'],
)
component_instances = [
downloader_task,
grep_task,
print_task,
]
return component_instances
| [
"[email protected]"
] | |
327e3bd7f32ec9065be455843c7a3ed5b6283fed | 4703856e735a81b43232bf47c8e1b0e7c29cc714 | /charities/serializers.py | 9a4a0bd37d128ac5bcd4edd3d9b128283e9a1c87 | [
"MIT"
] | permissive | salmanAndroidDev/charity-app | 6a367e8e16b55db20f3624559547c33299155285 | f2ea53c91c9cf46a63af6d3bef211c75dd5219bc | refs/heads/main | 2023-03-17T04:31:55.291455 | 2021-03-04T19:45:07 | 2021-03-04T19:45:07 | 344,589,781 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from rest_framework import serializers
from .models import Benefactor
from .models import Charity, Task
class BenefactorSerializer(serializers.ModelSerializer):
"""Serializer class for benefactor object"""
class Meta:
model = Benefactor
fields = ('experience', 'free_time_per_week')
class CharitySerializer(serializers.ModelSerializer):
"""Serializer class for charity object"""
class Meta:
model = Charity
fields = ('name', 'reg_number')
class TaskSerializer(serializers.ModelSerializer):
"""Serializer class for Task object"""
class Meta:
model = Task
fields = ('__all__')
| [
"[email protected]"
] | |
1d324d45ec8ac267e4dac7c06c7c9077ccda5aef | 3861d9f9c68eb0b09c46b9a10b92fca8fa608a23 | /Pygame/Snake/snake.py | e75117e333e90732a4c9f093dbd85a426892d47e | [] | no_license | vuquangtam/Apps | 3bbd8125dda67210862b114e3961f3d78676a06b | 94ba79e87b914595937efc95d60d8531172c87fa | refs/heads/master | 2021-01-22T13:08:22.922382 | 2016-02-01T02:51:49 | 2016-02-01T02:51:49 | 32,475,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,763 | py | import pygame, random, os
BLOCK_WIDTH = 40
BLOCK_HEIGHT = 40
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 600
BLOCK_X_MAX = int(WINDOW_WIDTH / BLOCK_WIDTH)
BLOCK_Y_MAX = int(WINDOW_HEIGHT / BLOCK_HEIGHT)
LENGHT_OF_SNAKE = 5
START_POSITION_X = 10
START_POSITION_Y = 10
SNAKE_SPEED = 1
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
pygame.init()
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
folder = "data"
head_sprite = pygame.image.load(os.path.join(folder, "head_sprite.png")).convert_alpha()
head_sprite = pygame.transform.scale(head_sprite, (BLOCK_WIDTH, BLOCK_HEIGHT))
apple_sprite = pygame.image.load(os.path.join(folder, "apple.png")).convert_alpha()
apple_sprite = pygame.transform.scale(apple_sprite, (BLOCK_WIDTH, BLOCK_HEIGHT))
background = pygame.image.load(os.path.join(folder, "background.jpg")).convert()
background = pygame.transform.scale(background, (WINDOW_WIDTH, WINDOW_HEIGHT))
class Block(pygame.sprite.Sprite):
previous_part = None
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((int(BLOCK_WIDTH), int(BLOCK_HEIGHT))).convert()
self.image.set_colorkey(BLACK)
pygame.draw.circle(self.image, YELLOW, (BLOCK_WIDTH // 2, BLOCK_HEIGHT // 2), BLOCK_WIDTH // 2, 0)
self.rect = self.image.get_rect()
self.oldx = self.rect.x
self.oldy = self.rect.y
def update(self):
self.oldx = self.rect.x
self.oldy = self.rect.y
self.rect.x, self.rect.y = self.previous_part.oldpos()
def oldpos(self):
return self.oldx, self.oldy
class Head(Block):
def __init__(self):
Block.__init__(self)
self.image = pygame.Surface((int(BLOCK_WIDTH), int(BLOCK_HEIGHT)))
self.image.blit(head_sprite, (0, 0))
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.dx = SNAKE_SPEED
self.dy = 0
def update(self):
self.oldx = self.rect.x
self.oldy = self.rect.y
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
self.dx = 0
if self.dy != SNAKE_SPEED:
self.dy = -SNAKE_SPEED
elif key[pygame.K_DOWN]:
self.dx = 0
if self.dy != -SNAKE_SPEED:
self.dy = SNAKE_SPEED
elif key[pygame.K_LEFT]:
self.dy = 0
if self.dx != SNAKE_SPEED:
self.dx = -SNAKE_SPEED
elif key[pygame.K_RIGHT]:
self.dy = 0
if self.dx != -SNAKE_SPEED:
self.dx = SNAKE_SPEED
self.rect.x += int(self.dx) * BLOCK_WIDTH
self.rect.y += int(self.dy) * BLOCK_HEIGHT
class Apple(pygame.sprite.Sprite):
def __init__(self, headOfSnake):
pygame.sprite.Sprite.__init__(self)
self.headOfSnake = headOfSnake
self.image = pygame.Surface((int(BLOCK_WIDTH), int(BLOCK_HEIGHT))).convert()
self.image.blit(apple_sprite, (0, 0))
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.reset()
def reset(self):
self.rect.x, self.rect.y = random.randint(1, BLOCK_X_MAX - 1), random.randint(1, BLOCK_Y_MAX - 1)
self.rect.x *= BLOCK_WIDTH
self.rect.y *= BLOCK_HEIGHT
def update(self):
return self.rect.x == self.headOfSnake.rect.x and self.rect.y == self.headOfSnake.rect.y
def drawLine(screen):
for x in range(0, WINDOW_WIDTH, BLOCK_WIDTH):
pygame.draw.line(screen, WHITE,(x, 0), (x, WINDOW_HEIGHT))
for y in range(0, WINDOW_HEIGHT, BLOCK_HEIGHT):
pygame.draw.line(screen, WHITE,(0, y), (WINDOW_WIDTH, y))
def getText(text, color):
font = pygame.font.SysFont(None, 50)
textSurf = font.render(text, 1, color)
return textSurf
clock = pygame.time.Clock()
level = 8
snake = pygame.sprite.Group()
all_sprite = pygame.sprite.Group()
snake_list = []
head = Head()
head.rect.x = (START_POSITION_X + LENGHT_OF_SNAKE) * BLOCK_WIDTH
head.rect.y = START_POSITION_Y * BLOCK_HEIGHT
all_sprite.add(head)
previous = head
snake_list.append(head)
apple = Apple(head)
for x in range(START_POSITION_X + LENGHT_OF_SNAKE - 1, START_POSITION_X, -1):
block = Block()
block.rect.x = x * BLOCK_WIDTH
block.rect.y = START_POSITION_Y * BLOCK_HEIGHT
block.previous_part = previous
previous = block
snake.add(block)
all_sprite.add(block)
snake_list.append(block)
all_sprite.add(apple)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
for block in snake_list:
block.update()
if apple.update():
apple.reset()
tail = snake_list[-1]
block = Block()
block.previous_part = tail
block.update()
snake.add(block)
all_sprite.add(block)
snake_list.append(block)
print len(snake_list)
if len(pygame.sprite.spritecollide(head, snake, False)):
pygame.quit()
if head.rect.x < 0 : head.rect.x = BLOCK_X_MAX * BLOCK_WIDTH
elif head.rect.x > BLOCK_X_MAX * BLOCK_WIDTH : head.rect.x = 0
if head.rect.y < 0 : head.rect.y = BLOCK_Y_MAX * BLOCK_HEIGHT
elif head.rect.y > BLOCK_Y_MAX * BLOCK_HEIGHT : head.rect.y = 0
screen.blit(background, (0,0))
drawLine(screen)
all_sprite.draw(screen)
screen.blit(getText('Score : %s'%(len(snake_list) - LENGHT_OF_SNAKE), BLUE), (10, 10))
pygame.display.flip()
clock.tick(level)
| [
"[email protected]"
] | |
f82270579338afb628549cc0faca8293c5922f33 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/FaceSecondDerivatives.py | a8db8408a607733b344af34107b62ddde5d1bfee | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | class FaceSecondDerivatives(object,IDisposable):
""" Contains second partial derivatives of a face at a specified point. """
def Dispose(self):
""" Dispose(self: FaceSecondDerivatives) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FaceSecondDerivatives,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: FaceSecondDerivatives) -> bool
"""
MixedDerivative=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The mixed derivative.
Get: MixedDerivative(self: FaceSecondDerivatives) -> XYZ
"""
UUDerivative=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The second derivative with respect to U.
Get: UUDerivative(self: FaceSecondDerivatives) -> XYZ
"""
VVDerivative=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The second derivative with respect to V.
Get: VVDerivative(self: FaceSecondDerivatives) -> XYZ
"""
| [
"[email protected]"
] | |
f7a2b88e5951ed5c9599d5fa6f35931526001d5d | e84020108a7037d8d4867d95fada1b72cbcbcd25 | /src/chattisgarh/misc/processNregaAssets.py | 00148cbca88667705777386bf35810bf5a749c0f | [] | no_license | rajesh241/libtech | 8384316051a2e8c2d4a925cd43216b855b82e4d9 | 0105e717357a3626106028adae9bf162a7f93fbf | refs/heads/master | 2022-12-10T03:09:00.048841 | 2020-06-14T09:39:04 | 2020-06-14T09:39:04 | 24,629,538 | 1 | 1 | null | 2022-12-08T02:26:11 | 2014-09-30T07:57:45 | Python | UTF-8 | Python | false | false | 4,969 | py | import csv
from bs4 import BeautifulSoup
import requests
import os
import time
import re
import sys
import urllib2
import MySQLdb
import time
import re
import os
import sys
import os.path
fileDir=os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, fileDir+'/../../includes/')
from settings import dbhost,dbuser,dbpasswd,sid,token
#Getting the block code
#inblock=sys.argv[1]
#print inblock
#Connect to MySQL Database
def main():
datadir='/home/libtech/webroot/chaupalDataDashboard/reports/general/chattisgarhNregaAssets/KOREA/'
workCodes=['AV','SK','CA','DP','FR','FP','FG','LD','IC','OP','PG','WH','RC','DW','RS','WC','IF']
workNames=['anganwadi','bharatNirmanRajeevGandhiSewaKendra','costalAreas','droughtProofing','fisheries','floodControlProtection','foodGrains','landDevelopment','microIrrigationWorks','otherWorks','playGround','renovationTraditionalWaterBodies','ruralConnectivity','ruralDrinkingWater','ruralSanitation','waterConservationWaterHarvesting','worksIndividualLand']
finYears=['2012-2013','2013-2014','2014-2015','2015-2016']
blockNames=['AMBIKAPUR','BATAULI','LAKHANPUR','LUNDRA','MAINPAT','SITAPUR','UDAIPUR']
blockCodes=['3305001','3305007','3305002','3305005','3305008','3305006','3305003']
db = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpasswd, db="korea",charset='utf8')
cur=db.cursor()
db.autocommit(True)
#Query to set up Database to read Hindi Characters
query="SET NAMES utf8"
cur.execute(query)
query="select stateCode,districtCode,blockCode,name from blocks"
# query="select stateCode,districtCode,blockCode,name from blocks where blockCode='005'"
cur.execute(query)
results=cur.fetchall()
for row in results:
fullBlockCode=row[0]+row[1]+row[2]
blockCode=row[2]
blockName=row[3]
print fullBlockCode+blockName
query="select workCode,description from workCodes where workCode='DP'"
query="select workCode,description from workCodes "
cur.execute(query)
results1=cur.fetchall()
for row1 in results1:
workDescription=row1[1]
finYears=['2012-2013','2013-2014','2014-2015','2015-2016']
# finYears=['2012-2013']
for finyear in finYears:
assetfilename=datadir+blockName+"/"+finyear+"/"+workDescription+".html"
print assetfilename
if (os.path.isfile(assetfilename)):
assethtml=open(assetfilename,'r').read()
# assethtml=re.sub(regex,"</font></td>",assethtml1)
else:
assethtml="Timeout expired"
htmlsoup=BeautifulSoup(assethtml)
try:
foundtable=htmlsoup.find('table',id="Table2")
table = foundtable.findNext('table')
rows = table.findAll('tr')
errorflag=0
except:
errorflag=1
if errorflag==0:
i=0
for tr in rows:
cols = tr.findAll('td')
print "Length of Columns ="+str(len(cols))
if len(cols) == 11:
block="".join(cols[2].text.split())
panchayat="".join(cols[3].text.split())
worknameworkcode=cols[4].text
print worknameworkcode.encode("UTF-8")
executingLevel="".join(cols[5].text.split())
completionDateString="".join(cols[6].text.split())
laborComponent="".join(cols[7].text.split())
materialComponent="".join(cols[8].text.split())
actualLaborExpense="".join(cols[9].text.split())
actualMaterialExpense="".join(cols[10].text.split())
if completionDateString != '':
completionDate = time.strptime(completionDateString, '%d/%m/%Y')
completionDate = time.strftime('%Y-%m-%d %H:%M:%S', completionDate)
else:
completionDate=''
worknameworkcodearray=re.match(r'(.*)\(3306(.*)\)',worknameworkcode)
if worknameworkcodearray:
workName=worknameworkcodearray.groups()[0]
workCode='3306'+worknameworkcodearray.groups()[1]
query="insert into assets (blockCode,block,panchayat,fullfinyear,executingLevel,workCode,workName,completionDate,laborComponent,materialComponent,actualLaborExpense,actualMaterialExpense) values ('%s','%s','%s','%s','%s','%s','%s','%s',%s,%s,%s,%s) " % (blockCode,blockName,panchayat,finyear,executingLevel,workCode,workName,completionDate,str(laborComponent),str(materialComponent),str(actualLaborExpense),str(actualMaterialExpense))
#print query.encode("UTF-8")
try:
cur.execute(query)
except MySQLdb.IntegrityError,e:
errormessage=(time.strftime("%d/%m/%Y %H:%M:%S "))+str(e)+"\n"
continue
cur.execute(query)
i=i+1
#print str(i)+block+panchayat+workCode.encode("UTF-8")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f9637053b972322b18327b19537200a6e0b6944e | 4027d8dafb6f60568f03357e329c09262161e963 | /machinelearn/neural_network/logistic.py | f68ba50920fdc46698497029c196f4d0f14b714c | [] | no_license | pentiumCM/machinelearn | a2bfa15d6e9f20fd604116f77186da76ebcc4f27 | 329bb9521b5e06e3471aa209fc87ca47f8d5fdcb | refs/heads/master | 2022-12-08T23:43:05.784930 | 2021-05-24T04:02:23 | 2021-05-24T04:02:23 | 216,704,188 | 7 | 1 | null | 2022-12-08T09:30:07 | 2019-10-22T02:13:45 | Python | UTF-8 | Python | false | false | 6,241 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@Author : pentiumCM
@Email : [email protected]
@Software: PyCharm
@File : logistic.py
@Time : 2019/10/27 21:49
@desc : 基于逻辑回归的单层感知器(神经网络)算法
'''
import numpy as np
import matplotlib.pyplot as plt
# 1. 激活函数
def sigmoid(z):
"""
sigmoid激活函数
:param z: 输入
:return: sigmoid(z)
"""
return 1 / (1 + np.exp(-z))
# 2.初始化参数,对于logistic回归,可以将权重初始化为零
def init_param_withZeros(dim):
"""
初始化权重和偏置
:param dim: 输入维度
:return: 返回初始化的w和b
w:(dim,1)的向量
b:标量
"""
w = np.zeros((dim, 1))
b = 0
return w, b
# 3.正向传播函数
def forward_propagate(w, b, X, Y):
"""
BP算法分为两个部分:正向传播与反向传播。正向传播:计算出神经网络的输出。反向传播是采用梯度下降法使误差函数减小
:param w: 权重向量
:param b: 偏置
:param X: 输入数据向量
:param Y: 输入标签。逻辑回归是二分类问题,Y为0/1
:return: 梯度和损失函数的值
"""
# 输入数据数目
num = X.shape[1]
Z = np.dot(w.T, X) + b # z = wTX + b.使用向量化同时计算,消除了代码中的显式的for循环
# 正向传播
A = sigmoid(Z) # 整个训练集的预测值 A = [a1,a2,......,am]
# 损失函数,损失函数采用交叉熵算法。
loss = Y * np.log(A) + (1 - Y) * np.log(1 - A)
# 成本函数是w和b的函数,是1到m项损失函数的平均,衡量了参数w,b在训练集上的效果
cost = -1 / num * np.sum(loss)
# 反向传播,求出权值w和偏置b的导数
dz = A - Y # dz = [a1-y1,.....,am-ym]
dw = 1 / num * np.dot(X, dz.T) # dw = 1/m * [x1 * dz1 + ... + xm * dzm]
db = 1 / num * np.sum(dz) # db = 1/m * (dz1 + dz2 + ... + dzm)
# 用字典存储dw和db
gradients = {"dw": dw,
"db": db}
return gradients, cost
# 4. 反向传播函数,采用梯度下降与优化, 求解使损失函数最小的W和b。
def backward_propagate(w, b, X, Y, iters, learning_rate):
"""
反向传播是采用梯度下降法使误差函数减小
:param w: 初始权值
:param b: 初始偏置
:param X: 输入数据
:param Y: 输入数据标签
:param iters: 训练迭代次数
:param learning_rate: 学习速率
:return: 权值w,偏置b,梯度gradients和损失函数cost
"""
# 存储损失函数的值
costs = []
for i in range(iters):
# 初始化梯度和损失函数的值
gradients, cost = forward_propagate(w, b, X, Y)
# 获取偏导。dw为总体样本损失函数对w的导数的均值,db为总体样本损失函数对b导数的均值
dw = gradients["dw"]
db = gradients["db"]
# 更新参数权值w和偏置b
w = w - learning_rate * dw
b = b - learning_rate * db
# 记录损失并输出函数
costs.append(cost)
print("The cost in the %d th iteration is %f" % (i, cost))
gradients = {"dw": dw,
"db": db}
return w, b, gradients, costs
# 5.预测
def predict(w, b, X):
"""
预测结果分为两部分,第一部分计算输出:A = sigmoid(np.dot(w.T, X) + b)。
然后判断输出和0.5的大小,大于0.5则为1,反之则为0
:param w: 训练后的权值
:param b: 训练后偏置
:param X: 输入数据
:return: 预测出输入数据的标签
"""
# 获取输入数目m
m = X.shape[1]
# 存储预测结果
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
# 神经元输出
A = sigmoid(np.dot(w.T, X) + b)
# 开始预测
for i in range(A.shape[1]):
if A[0, i] > 0.5:
Y_prediction[0, i] = 1
else:
Y_prediction[0, i] = 0
return Y_prediction
# XOY坐标轴的散点图
def plot_dis_data(x, y, color, graph_name):
'''
用散点图显示出点的分布
:param x: X坐标
:param y: Y坐标
:param color: 点的颜色
:param graph_name: 图表的名称
:return:
'''
plt.scatter(x, y, s=15, c=color)
plt.title(graph_name)
plt.show()
# XOY坐标轴的线性图
def plot_line_chart(data, xlabel, ylabel, graph_name):
plt.plot(data)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(graph_name)
plt.show()
if __name__ == "__main__":
# X为横坐标,Y为纵坐标。 相当于两个属性x1,x2
X = [0, 1.5, 2, 2, 2.25, 2.8, 3.2, 4]
Y = [1.5, 3, 4, 3, 0.5, 2.8, 1.35, 4]
label = [1, 1, 1, 1, 0, 0, 0, 0]
# 1类为蓝色, 0类为红色
label_color = ['blue', 'red']
color = []
for i in label:
if i == 1:
color.append(label_color[0])
else:
color.append(label_color[1])
# pyplot绘制原始数据分布图
plot_dis_data(X, Y, color, 'Raw Data')
# 数据归一化
X = np.array(X)
Y = np.array(Y)
X = (X - np.average(X))
Y = (Y - np.average(Y))
X = X / X.max()
Y = Y / Y.max()
# pyplot绘制归一化之后的数据分布图
plot_dis_data(X, Y, color, 'Normalization Data')
data_X = np.vstack((X, Y))
data_label = np.array([label])
# 参数设置
w = []
b = []
Y_prediction = []
iters = 50 # 迭代次数
learning_rate = 0.5 # 学习率
w, b = init_param_withZeros(data_X.shape[0]) # 初始化w,b
# 开始训练
w, b, gradients, costs = backward_propagate(
w, b, data_X, data_label, iters, learning_rate)
Y_prediction = predict(w, b, data_X, data_label)
# pyplot画"损失函数-迭代次数"的线性图
plot_line_chart(
costs,
'iterations',
'cost',
"Learning rate =" +
str(learning_rate))
# 测试输入数据
point = input("Please enter a coordinates:\n")
# 获取坐标
x = int(point.split(' ')[0])
y = int(point.split(' ')[1])
point_data = np.vstack((x, y))
point_prediction = predict(w, b, point_data, data_label)
print("The point is below to", end=" ")
print(point_prediction[0, 0])
| [
"[email protected]"
] | |
63ac7ec011e053eafb40f25971c5e3fa3ad6defd | 60b1f668808de2b82c2fcb62b07b45bb165219f2 | /egoi-api/models/saved_segment_all_of_segment_filter_segment_filter_array.py | 65c329a2a43a5f7602bf347fbbdcf0adc025b8a1 | [] | no_license | andersonmiguel/Egoi | 6d37bf7a3a7555e764f7a6e792b3ef1c68fe8e20 | b5f59f9b33ea94e170f4e7e26c6a37a78d2874c2 | refs/heads/master | 2022-06-21T07:18:44.920786 | 2020-05-04T17:29:02 | 2020-05-04T17:29:02 | 261,250,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,177 | py | # coding: utf-8
"""
APIv3 (Beta)
# Introduction Just a quick peek!!! This is our new version of API. Remember, it is not stable yet!!! But we invite you play with it and give us your feedback ;) # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <b><a href='https://github.com/E-goi/sdk-java'>Java</a></b> * <b><a href='https://github.com/E-goi/sdk-php'>PHP</a></b> * <b><a href='https://github.com/E-goi/sdk-python'>Python</a></b> <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0-beta
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from egoi-api.configuration import Configuration
class SavedSegmentAllOfSegmentFilterSegmentFilterArray(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'segment_field': 'str',
'segment_operator': 'str',
'segment_value': 'str'
}
attribute_map = {
'segment_field': 'segment_field',
'segment_operator': 'segment_operator',
'segment_value': 'segment_value'
}
def __init__(self, segment_field=None, segment_operator=None, segment_value=None, local_vars_configuration=None): # noqa: E501
"""SavedSegmentAllOfSegmentFilterSegmentFilterArray - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._segment_field = None
self._segment_operator = None
self._segment_value = None
self.discriminator = None
if segment_field is not None:
self.segment_field = segment_field
if segment_operator is not None:
self.segment_operator = segment_operator
if segment_value is not None:
self.segment_value = segment_value
@property
def segment_field(self):
"""Gets the segment_field of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
Field internal name (retrieve fields using GET: /lists/{list_id}/fields) # noqa: E501
:return: The segment_field of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
:rtype: str
"""
return self._segment_field
@segment_field.setter
def segment_field(self, segment_field):
"""Sets the segment_field of this SavedSegmentAllOfSegmentFilterSegmentFilterArray.
Field internal name (retrieve fields using GET: /lists/{list_id}/fields) # noqa: E501
:param segment_field: The segment_field of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
:type: str
"""
self._segment_field = segment_field
@property
def segment_operator(self):
"""Gets the segment_operator of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
Segment operator used for comparison. When using a range of dates split them using the following format: date1||date2 # noqa: E501
:return: The segment_operator of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
:rtype: str
"""
return self._segment_operator
@segment_operator.setter
def segment_operator(self, segment_operator):
"""Sets the segment_operator of this SavedSegmentAllOfSegmentFilterSegmentFilterArray.
Segment operator used for comparison. When using a range of dates split them using the following format: date1||date2 # noqa: E501
:param segment_operator: The segment_operator of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
:type: str
"""
allowed_values = ["equal_to", "not_equal_to", "greater_or_equal_to", "greater_than", "less_or_equal_to", "less_than", "contains", "does_not_contain", "begins_with", "ends_with", "date_range"] # noqa: E501
if self.local_vars_configuration.client_side_validation and segment_operator not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `segment_operator` ({0}), must be one of {1}" # noqa: E501
.format(segment_operator, allowed_values)
)
self._segment_operator = segment_operator
@property
def segment_value(self):
"""Gets the segment_value of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
Segment search value # noqa: E501
:return: The segment_value of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
:rtype: str
"""
return self._segment_value
@segment_value.setter
def segment_value(self, segment_value):
"""Sets the segment_value of this SavedSegmentAllOfSegmentFilterSegmentFilterArray.
Segment search value # noqa: E501
:param segment_value: The segment_value of this SavedSegmentAllOfSegmentFilterSegmentFilterArray. # noqa: E501
:type: str
"""
self._segment_value = segment_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SavedSegmentAllOfSegmentFilterSegmentFilterArray):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SavedSegmentAllOfSegmentFilterSegmentFilterArray):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
73937dbd616aa7fd615db0615064c976542f5ef3 | c9ab605cdd2dbf92c9de05768ade0ecf1718be02 | /algorithm/t3.py | 74cec31f9cfc6fb539cb226f55bf3b5622627dae | [] | no_license | PyeongGang-Kim/TIL | 42d69308cf99d2e07644b51d7636e1b64551a697 | 8711501d131ee7d78fdaac544dda2008adf820a1 | refs/heads/master | 2023-01-12T21:10:38.027946 | 2021-10-23T07:19:48 | 2021-10-23T07:19:48 | 195,937,990 | 10 | 1 | null | 2023-01-07T11:25:30 | 2019-07-09T05:22:45 | HTML | UTF-8 | Python | false | false | 814 | py | # 0번집이 털린 경우의 dp 최대값 - 마지막 빼고 할것
# 0번집이 털리지 않은 경우의 dp 최대값 - 첫번째 빼고 할 것
def solution(money):
D11 = [0] * len(money)
D12 = [0] * len(money)
D21 = [0] * len(money)
D22 = [0] * len(money)
D11[0] = money[0]
D21[1] = money[1]
# 이전 인덱스의 0 번이 털린경우의 최대값
# 1번이 털리지 않은 경우의 최대값
for i in range(1, len(money) - 1):
D11[i] = money[i] + D12[i-1]
D12[i] = D11[i-1] if D11[i-1] > D12[i-1] else D12[i-1]
for i in range(2, len(money)):
D21[i] = money[i] + D22[i-1]
D22[i] = D21[i-1] if D21[i-1] > D22[i-1] else D22[i-1]
answer = max(D11[-2], D12[-2], D21[-1], D22[-1])
return answer
print(solution([1, 2, 3, 1] )) | [
"[email protected]"
] | |
84330b145a69b3630554ae4f66c56b7a6e6c2946 | aa42be48004e22faf72e5a2cfcd4714cfba04ee7 | /crafters/image/ImageCropper/__init__.py | 98c16e7eb0bdd54389537e9ab4543e251ef70e54 | [
"Apache-2.0"
] | permissive | YueLiu1415926/jina-hub | e14b426924cb00f8253004271cda7f050ef1c3c4 | e0a7dc95dbd69a55468acbf4194ddaf11fd5aa6c | refs/heads/master | 2022-12-05T15:36:26.665207 | 2020-08-20T03:40:47 | 2020-08-20T03:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, Union
import numpy as np
from jina.executors.crafters import BaseCrafter
from .helper import _crop_image, _move_channel_axis, _load_image
class ImageCropper(BaseCrafter):
"""
:class:`ImageCropper` crops the image with the specific crop box. The coordinate is the same coordinate-system in
the :py:mode:`PIL.Image`.
"""
def __init__(self, top: int, left: int, height: int, width: int, channel_axis: int = -1, *args, **kwargs):
"""
:param top: the vertical coordinate of the top left corner of the crop box.
:param left: the horizontal coordinate of the top left corner of the crop box.
:param height: the height of the crop box.
:param width: the width of the crop box.
:param channel_axis: the axis refering to the channels
"""
super().__init__(*args, **kwargs)
self.top = top
self.left = left
self.height = height
self.width = width
self.channel_axis = channel_axis
def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:
"""
Crop the input image array.
:param blob: the ndarray of the image
:returns: a chunk dict with the cropped image
"""
raw_img = _load_image(blob, self.channel_axis)
_img, top, left = _crop_image(raw_img, target_size=(self.height, self.width), top=self.top, left=self.left)
img = _move_channel_axis(np.asarray(_img), -1, self.channel_axis)
return dict(offset=0, weight=1., blob=img.astype('float32'), location=(top, left))
| [
"[email protected]"
] | |
f13bebe05707028d5ef7c32256afa8695be99970 | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /ppo_baseline_DMB/WORKINGON/easy_ppo_v6/Exp_run_v0001.py | 8fb7ec75b0875fad040abe46af4ef32294900a85 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 5,926 | py | from collections import deque
import time
import torch
import numpy as np
try:
from .envs import make_vec_envs
except Exception: #ImportError
from envs import make_vec_envs
try:
from .model import Policy
except Exception:
from model import Policy
try:
from .ppo import PPO
except Exception:
from ppo import PPO
try:
from .storage import RolloutStorage
except Exception:
from storage import RolloutStorage
try:
from .util_this import Log
except Exception:
from util_this import Log
try:
from .evaluation import evaluate
except Exception:
from evaluation import evaluate
try:
from .utils_from_pytorch import get_vec_normalize
except Exception:
from utils_from_pytorch import get_vec_normalize
def ss(s=''):
print()
print(' ---' * 15)
print(' ---' * 15)
print()
# print(' >>>>>>>>>>>>>>>>>>>> <<<<<<<<<<<<<<<<<<<< ')
print(s)
print()
print(' ---' * 15)
print(' ---' * 15)
print()
import sys
sys.exit()
log_name = 'ppo_PongrD4_6act_gamma'
args_env_name = 'Pong-ramDeterministic-v4'
args_num_processes = 10 # how many envs running, default: 10
args_seed = 1
args_gamma = 0.99
args_num_mini_batch = 10 # how many batchs to train, default: 32
args_clip_param = 0.2
args_ppo_epoch = 4 # in training weight after collection, how many epoch to train agent, default: 4
args_value_loss_coef = 0.5
args_entropy_coef = 0.01
args_lr = 0.0007
args_eps = 1e-5
args_max_grad_norm = 0.5
args_num_steps = 10 # in gathering rollouts, how many steps forward, default: 4
args_num_env_steps = 5e6 # total training steps
args_log_interval = 200
args_eval_interval = 200
def main():
# is_limit_action = True
is_limit_action = False
train_log = Log(log_name+'_train_log')
evl_log = Log(log_name+'_evaluation_log')
torch.set_num_threads(1)
envs = make_vec_envs(
args_env_name,
args_seed,
args_num_processes)
if is_limit_action:
envs.action_space.n = 3
print('Number of Actions:', envs.action_space.n)
# print(envs.action_space)
# ss('hohoho')
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space)
agent = PPO(
actor_critic,
args_clip_param,
args_ppo_epoch,
args_num_mini_batch,
args_value_loss_coef,
args_entropy_coef,
lr=args_lr,
eps=args_eps,
max_grad_norm=args_max_grad_norm)
rollouts = RolloutStorage(
args_num_steps,
args_num_processes,
envs.observation_space.shape,
envs.action_space)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
# print(obs)
# ss('i am over it')
num_updates = int(
args_num_env_steps) // args_num_steps // args_num_processes
episode_rewards = deque(maxlen=10)
start = time.time()
sum_re = torch.zeros(args_num_processes, 1)
for j in range(num_updates):
for step in range(args_num_steps):
with torch.no_grad():
value, action, action_log_prob\
= actor_critic.act(rollouts.obs[step])
# print(action)
# print()
# action = action + 1
# print(action)
# ss('hoiohasdfhioas')
if is_limit_action:
obs, reward, done, infos = envs.step(action+1)
else:
obs, reward, done, infos = envs.step(action)
sum_re += reward
if any(done):
for i in range(len(done)):
if done[i]:
episode_rewards.append(sum_re[i].item())
# print(done)
# print(sum_re[i])
sum_re[i] *= 0
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(obs, action,
action_log_prob,
value, reward,
masks, bad_masks)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1])
rollouts.compute_returns(
next_value,
args_gamma)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
if j % args_log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args_num_processes * args_num_steps
end = time.time()
logstring = "E {}, N_steps {}, FPS {} mean/median" \
" {:.1f}/{:.1f}, min/max {:.1f}/{:.1f}" \
" Entropy {:.5f},V {:.5f},Action {:.5f}".format(
j, total_num_steps,
int(total_num_steps / (end - start)),
np.mean(episode_rewards),
np.median(episode_rewards), np.min(episode_rewards),
np.max(episode_rewards),
dist_entropy, value_loss,
action_loss)
# print(logstring)
train_log.log(logstring)
# if True:
if (args_eval_interval is not None and len(episode_rewards) > 1
and j % args_eval_interval == 0):
total_num_steps = (j + 1) * args_num_processes * args_num_steps
ob_rms = get_vec_normalize(envs).ob_rms
ev_result = evaluate(actor_critic, ob_rms, args_env_name, args_seed,
args_num_processes, is_limit_action=is_limit_action)
ev_log_string = 'steps:'+str(total_num_steps)+'. '+ev_result
evl_log.log(ev_log_string)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
e3ab109cbd7ee8af1a38d19e640309ac777edf33 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_humidifies.py | eca272ad0fd541f4afc6614cbf8c1e3c03ae63f2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py |
#calss header
class _HUMIDIFIES():
def __init__(self,):
self.name = "HUMIDIFIES"
self.definitions = humidify
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['humidify']
| [
"[email protected]"
] | |
56ad186bf416e7055d7e7210a444f0856051c226 | 25ebf226893b44dd8a6b1b85cf80864579372892 | /divide-two-integers/Wrong Answer/2-28-2021, 1:11:22 AM/Solution.py | 6f112ee1acf15b23f426815b780ac2338de4df1f | [] | no_license | TianrunCheng/LeetcodeSubmissions | db15f5a1a8e1bbecefc45cb0b2b5fbaa036aa6f5 | 00a5403f1950e039ccc370cb266b752faebb8e79 | refs/heads/main | 2023-06-29T21:51:43.029300 | 2021-07-22T03:12:15 | 2021-07-22T03:12:15 | 388,305,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | // https://leetcode.com/problems/divide-two-integers
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
neg = False
if dividend < 0:
dividend = 0 - dividend
neg = not neg
if divisor < 0:
divisor = 0 - divisor
neg = not neg
powers = [divisor] # record the 2^n * divisor values at index n
while powers[-1] < dividend:
temp = powers[-1] + powers[-1]
powers.append(temp)
bi_quotient = []
for i in range(len(powers)-1, -1, -1):
if dividend > powers[i]:
bi_quotient.append(1)
dividend = dividend - powers[i]
else:
bi_quotient.append(0)
n = ''.join([str(elem) for elem in bi_quotient])
if neg:
return -int(n,2)
return int(n, 2)
| [
"[email protected]"
] | |
9a69b3ede4a9045e9356c9c5067bc0b9f40dac61 | e21599d08d2df9dac2dee21643001c0f7c73b24f | /practice/profile/cProfile/stats.py | 5c1b922d260936e05ac059b23dec1b91f2af3de5 | [] | no_license | herolibra/PyCodeComplete | c7bf2fb4ce395737f8c67749148de98a36a71035 | 4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b | refs/heads/master | 2022-07-17T05:39:03.554760 | 2020-05-03T07:00:14 | 2020-05-03T07:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
if __name__ == '__main__':
import pstats
# 创建Stats对象
p = pstats.Stats("result.out")
# strip_dirs(): 去掉无关的路径信息
# sort_stats(): 排序,支持的方式和上述的一致
# print_stats(): 打印分析结果,可以指定打印前几行
# 按照函数名排序,只打印前3行函数的信息, 参数还可为小数,表示前百分之几的函数信息
p.strip_dirs().sort_stats("name").print_stats(3)
# 按照运行时间和函数名进行排序
p.strip_dirs().sort_stats("cumulative", "name").print_stats(0.8)
# 如果想知道有哪些函数调用了bar
p.print_callers("bar")
# 查看test()函数中调用了哪些函数
p.print_callees("foo") | [
"[email protected]"
] | |
f7639798f82887eb95ee1a2118e19c3658161c34 | 7ebc4e9ade9c0c0312c87d74f94929e5c3bf96a6 | /code/main_error_curve.py | 965d06b86faa3b4db42e75011942c2bc73800513 | [] | no_license | nipunbatra/transferable-energy-breakdown-old | 7fa02dd84b0eb37875c190c5a06bfc9d1a2a9218 | bc12de92d620d33e1ca4cf841af341eb3d4bcd76 | refs/heads/master | 2021-08-22T09:24:40.322431 | 2017-11-29T21:18:43 | 2017-11-29T21:18:43 | 78,712,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,316 | py | import os, glob
import pandas as pd
import itertools
import numpy as np
path = os.path.expanduser('~/transfer/')
from common_functions import create_feature_combinations
#from common_functions import valid_homes_data
test_region = 'SanDiego'
train_regions = ["Austin","Boulder","SanDiego"]
FEATURE_LISTS = [
['energy'],
['energy','home'],
['energy','region'],
['energy', 'home', 'region']
]
def compute_prediction(frac_path, appliance, feature_comb, k):
file_path = os.path.join(frac_path, '%s_%d_%s_*.csv' %(appliance, k, "_".join(feature_comb)))
files = glob.glob(file_path)
out = {}
for e in files:
out[int(e.split('_')[-1][:-4])] = pd.read_csv(e,index_col=0, header=None).squeeze()
return pd.DataFrame(out).T
def main():
out = {}
for appliance in ['hvac','fridge']:
out[appliance] = {}
for feature in FEATURE_LISTS:
feature_combinations = create_feature_combinations(feature, 2)
out[appliance]["_".join(feature)] = {}
test_path = os.path.join(path, test_region, "_".join(feature))
for austin_fraction in np.linspace(0.0,1.0,6):
out[appliance]["_".join(feature)][austin_fraction] = {}
for boulder_fraction in np.linspace(0.0,1.0,6):
out[appliance]["_".join(feature)][austin_fraction][boulder_fraction] = {}
for sd_fraction in np.linspace(0.0,1.0,6):
out[appliance]["_".join(feature)][austin_fraction][boulder_fraction][sd_fraction] = {}
for k in range(1,9):
out[appliance]["_".join(feature)][austin_fraction][boulder_fraction][sd_fraction][k]={}
train_fraction_dict = {'Austin':austin_fraction,'Boulder':boulder_fraction,'SanDiego':sd_fraction}
frac_string = "_".join([str(int(100*train_fraction_dict[x])) for x in train_regions])
frac_path = os.path.join(test_path, frac_string)
for feature_comb in np.array(feature_combinations)[:]:
try:
print appliance, "_".join(feature), austin_fraction, boulder_fraction, sd_fraction, k, feature_comb
out[appliance]["_".join(feature)][austin_fraction][boulder_fraction][sd_fraction][k]["_".join(feature_comb)]=compute_prediction(frac_path, appliance, feature_comb, k)
except:
pass
return out
out = main()
import pickle
pickle.dump(out, open('out_fraction.pkl','wb'))
"""
import os, glob
import pandas as pd
import itertools
path = os.path.expanduser('~/transfer/')
from common_functions import feature_combinations
from test_homes import valid_homes_data
feature_combinations_names = ['_'.join(a) for a in feature_combinations]
def compute_prediction_num_homes_case(num_homes, case, appliance, feature, k):
num_homes_path = os.path.join(path, str(num_homes))
return compute_prediction_case(case, appliance, feature, k, num_homes_path)
def compute_prediction_case(case, appliance, feature, k, path=path):
files_path = os.path.join(path, '%d_%s_%d_%s_*.csv' %(case, appliance, k, feature))
files = glob.glob(files_path)
out = {}
for e in files:
out[int(e.split('_')[-1][:-4])] = pd.read_csv(e,index_col=0, header=None).squeeze()
return pd.DataFrame(out).T
def compute_prediction(appliance, feature, k):
files = glob.glob(path+'%s_%d_%s_*.csv' %(appliance, k, feature))
out = {}
for e in files:
out[int(e.split('_')[-1][:-4])] = pd.read_csv(e,index_col=0, header=None).squeeze()
return pd.DataFrame(out).T
def compute_prediction_subset(appliance, feature, latent_factors, ran, num_homes):
files = glob.glob(path +'%d_%d_%s_%d_%s_*.csv' % (ran, num_homes, appliance, latent_factors, feature))
out = {}
for e in files:
out[int(e.split('_')[-1][:-4])] = pd.read_csv(e,index_col=0, header=None).squeeze()
return pd.DataFrame(out).T
def find_all_error():
out = {}
for appliance in ['wm','mw','oven','fridge','hvac','dw']:
out[appliance]={}
for feature in ['None', 'temperature','occ', 'area','rooms','occ_area','occ_rooms','area_rooms','occ_area_rooms']:
out[appliance][feature]={}
for k in range(1, 10):
try:
print feature, k, appliance
pred_df = compute_prediction(appliance, feature, k)
gt_df = find_gt_df(appliance, pred_df)
out[appliance][feature][k] = find_error_df(gt_df, pred_df)
except:
pass
return out
def find_gt_df(appliance, pred_df):
import pickle
out_overall = pickle.load(open('/if6/nb2cz/git/Neighbourhood-NILM/data/input/all_regions.pkl', 'r'))
region = "SanDiego"
df = out_overall[region]
gt_df = df[pred_df.columns].ix[pred_df.index]
return gt_df
def find_error_df(gt_df, pred_df):
return (pred_df-gt_df).abs().div(gt_df).mul(100)
def find_optimal(appliance):
o = {}
for feature in ['None','temperature', 'occ', 'area','rooms','occ_area','occ_rooms','area_rooms','occ_area_rooms']:
o[feature]={}
for k in range(1, 10):
try:
print feature, k
pred_df = compute_prediction(appliance, feature, k)
gt_df = find_gt_df(appliance, pred_df)
o[feature][k] = find_error_df(gt_df, pred_df).median().mean()
print o[feature][k], len(pred_df)
except Exception, e:
print e
return pd.DataFrame(o)
def create_overall_dict():
out = {}
#for appliance in ['wm','mw','oven','fridge','hvac','dw']:
for appliance in ['fridge']:
out[appliance]={}
for feature in ['None']:
#for feature in ['None', 'temperature','occ', 'area','rooms','occ_area','occ_rooms','area_rooms','occ_area_rooms']:
out[appliance][feature]={}
for k in range(1, 10):
try:
print feature, k, appliance
pred_df = compute_prediction(appliance, feature, k)
out[appliance][feature][k] = pred_df
except:
pass
return out
def create_overall_dict(case):
out = {}
for num_homes in range(4, 40, 4):
out[num_homes] = create_overall_dict_num_homes_case(num_homes, case)
return out
def create_overall_dict_num_homes_case(num_homes, case):
out = {}
#for appliance in ['wm','mw','oven','fridge','hvac','dw']:
for appliance in ['fridge','hvac']:
out[appliance]={}
for feature in feature_combinations_names:
out[appliance][feature]={}
for k in range(1, 10):
try:
print feature, k, appliance
pred_df = compute_prediction_num_homes_case(num_homes, case, appliance, feature, k)
out[appliance][feature][k] = pred_df
except Exception, e:
print e
return out
def create_overall_dict_subset():
out = {}
for num_homes in range(5, 55, 5):
out[num_homes]={}
#for appliance in ['wm','mw','oven','fridge','hvac','dw']:
for appliance in ['hvac']:
out[num_homes][appliance]={}
for feature in ['None']:
#for feature in ['None', 'occ', 'area','rooms','occ_area','occ_rooms','area_rooms','occ_area_rooms']:
out[num_homes][appliance][feature]={}
#for latent_factors in range(2, 10):
for latent_factors in range(2,3):
out[num_homes][appliance][feature][latent_factors] = {}
for ran in range(10):
try:
print num_homes, feature, latent_factors, appliance
pred_df = compute_prediction_subset(appliance, feature, latent_factors, ran, num_homes)
out[num_homes][appliance][feature][latent_factors][ran] = pred_df
except Exception, e:
print e
"""
| [
"[email protected]"
] | |
9f5deeabe426194334c63fe23dfd1178c20184ec | 7041c85dffb757c3e7063118730363f32ebb9b8a | /Algorithm/python 파일/20190129/글자수.py | 0740ce2e51a31a4650217cb8239b85c97193425d | [] | no_license | woonji913/til | efae551baff56f3ca16169b93185a65f4d81cd7a | a05efc68f88f535c26cb4d4a396a1e9cd6bf0248 | refs/heads/master | 2021-06-06T23:17:54.504620 | 2019-06-19T04:29:18 | 2019-06-19T04:29:18 | 163,778,844 | 1 | 0 | null | 2021-05-08T16:27:17 | 2019-01-02T01:08:19 | HTML | UTF-8 | Python | false | false | 249 | py | import sys
sys.stdin = open("글자수_input.txt", "r")
T = int(input())
for tc in range(1, T + 1):
str1 = str(input())
str2 = str(input())
ans = []
for i in str1:
ans.append(str2.count(i))
print(f"#{tc} {max(ans)}")
| [
"[email protected]"
] | |
a43fde820e3da79cdccf1ae62da896f1bba46980 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/storage/netapp/test_netapp_e_hostgroup.py | 5aad0e05611ad4988245905b190b8e2a1ab08a05 | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,186 | py | # (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.plugins.modules.netapp_e_hostgroup import NetAppESeriesHostGroup
from ansible_collections.notstdlib.moveitallout.tests.unit.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
try:
from unittest import mock
except ImportError:
import mock
class HostTest(ModuleTestCase):
REQUIRED_PARAMS = {"api_username": "rw",
"api_password": "password",
"api_url": "http://localhost",
"ssid": "1"}
REQ_FUNC = 'ansible_collections.notstdlib.moveitallout.plugins.modules.netapp_e_hostgroup.NetAppESeriesHostGroup.request'
HOSTS_GET_RESPONSE = [
{"hostRef": "84000000600A098000A4B28D0030102E5C3DFC0F",
"clusterRef": "85000000600A098000A4B28D0036102C5C3DFC08", "id": "84000000600A098000A4B28D0030102E5C3DFC0F",
"name": "host1"},
{"hostRef": "84000000600A098000A4B28D003010315C3DFC11",
"clusterRef": "85000000600A098000A4B9D100360F765C3DFC1C", "id": "84000000600A098000A4B28D003010315C3DFC11",
"name": "host2"},
{"hostRef": "84000000600A098000A4B28D003010345C3DFC14",
"clusterRef": "85000000600A098000A4B9D100360F765C3DFC1C", "id": "84000000600A098000A4B28D003010345C3DFC14",
"name": "host3"}]
HOSTGROUPS_GET_RESPONSE = [
{"clusterRef": "85000000600A098000A4B28D0036102C5C3DFC08", "id": "85000000600A098000A4B28D0036102C5C3DFC08",
"name": "group1"},
{"clusterRef": "85000000600A098000A4B9D100360F765C3DFC1C", "id": "85000000600A098000A4B9D100360F765C3DFC1C",
"name": "group2"},
{"clusterRef": "85000000600A098000A4B9D100360F775C3DFC1E", "id": "85000000600A098000A4B9D100360F775C3DFC1E",
"name": "group3"}]
def _set_args(self, args):
self.module_args = self.REQUIRED_PARAMS.copy()
self.module_args.update(args)
set_module_args(self.module_args)
def test_hosts_fail(self):
"""Ensure that the host property method fails when self.request throws an exception."""
self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaises(AnsibleFailJson):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
hosts = hostgroup_object.hosts
with mock.patch(self.REQ_FUNC, return_value=(200, [])):
with self.assertRaisesRegexp(AnsibleFailJson, "Expected host does not exist"):
hosts = hostgroup_object.hosts
def test_hosts_pass(self):
"""Evaluate hosts property method for valid returned data structure."""
expected_host_list = ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D0030102E5C3DFC0F']
for hostgroup_hosts in [["host1", "host2"], ["84000000600A098000A4B28D0030102E5C3DFC0F",
"84000000600A098000A4B28D003010315C3DFC11"]]:
self._set_args({"state": "present", "name": "hostgroup1", "hosts": hostgroup_hosts})
hostgroup_object = NetAppESeriesHostGroup()
with mock.patch(self.REQ_FUNC, return_value=(200, self.HOSTS_GET_RESPONSE)):
for item in hostgroup_object.hosts:
self.assertTrue(item in expected_host_list)
# Create hostgroup with no hosts
self._set_args({"state": "present", "name": "hostgroup1"})
hostgroup_object = NetAppESeriesHostGroup()
with mock.patch(self.REQ_FUNC, return_value=(200, [])):
self.assertEqual(hostgroup_object.hosts, [])
def test_host_groups_fail(self):
"""Ensure that the host_groups property method fails when self.request throws an exception."""
self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaises(AnsibleFailJson):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
host_groups = hostgroup_object.host_groups
def test_host_groups_pass(self):
"""Evaluate host_groups property method for valid return data structure."""
expected_groups = [
{'hosts': ['84000000600A098000A4B28D0030102E5C3DFC0F'], 'id': '85000000600A098000A4B28D0036102C5C3DFC08',
'name': 'group1'},
{'hosts': ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D003010345C3DFC14'],
'id': '85000000600A098000A4B9D100360F765C3DFC1C', 'name': 'group2'},
{'hosts': [], 'id': '85000000600A098000A4B9D100360F775C3DFC1E', 'name': 'group3'}]
self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
hostgroup_object = NetAppESeriesHostGroup()
with mock.patch(self.REQ_FUNC,
side_effect=[(200, self.HOSTGROUPS_GET_RESPONSE), (200, self.HOSTS_GET_RESPONSE)]):
self.assertEqual(hostgroup_object.host_groups, expected_groups)
@mock.patch.object(NetAppESeriesHostGroup, "host_groups")
@mock.patch.object(NetAppESeriesHostGroup, "hosts")
@mock.patch.object(NetAppESeriesHostGroup, "create_host_group")
@mock.patch.object(NetAppESeriesHostGroup, "update_host_group")
@mock.patch.object(NetAppESeriesHostGroup, "delete_host_group")
def test_apply_pass(self, fake_delete_host_group, fake_update_host_group, fake_create_host_group, fake_hosts,
fake_host_groups):
"""Apply desired host group state to the storage array."""
hosts_response = ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D0030102E5C3DFC0F']
host_groups_response = [
{'hosts': ['84000000600A098000A4B28D0030102E5C3DFC0F'], 'id': '85000000600A098000A4B28D0036102C5C3DFC08',
'name': 'group1'},
{'hosts': ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D003010345C3DFC14'],
'id': '85000000600A098000A4B9D100360F765C3DFC1C', 'name': 'group2'},
{'hosts': [], 'id': '85000000600A098000A4B9D100360F775C3DFC1E', 'name': 'group3'}]
fake_host_groups.return_value = host_groups_response
fake_hosts.return_value = hosts_response
fake_create_host_group.return_value = lambda x: "Host group created!"
fake_update_host_group.return_value = lambda x: "Host group updated!"
fake_delete_host_group.return_value = lambda x: "Host group deleted!"
# Test create new host group
self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaises(AnsibleExitJson):
hostgroup_object.apply()
# Test make no changes to existing host group
self._set_args({"state": "present", "name": "group1", "hosts": ["host1"]})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaises(AnsibleExitJson):
hostgroup_object.apply()
# Test add host to existing host group
self._set_args({"state": "present", "name": "group1", "hosts": ["host1", "host2"]})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaises(AnsibleExitJson):
hostgroup_object.apply()
# Test delete existing host group
self._set_args({"state": "absent", "name": "group1"})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaises(AnsibleExitJson):
hostgroup_object.apply()
@mock.patch.object(NetAppESeriesHostGroup, "host_groups")
@mock.patch.object(NetAppESeriesHostGroup, "hosts")
def test_apply_fail(self, fake_hosts, fake_host_groups):
"""Apply desired host group state to the storage array."""
hosts_response = ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D0030102E5C3DFC0F']
host_groups_response = [
{'hosts': ['84000000600A098000A4B28D0030102E5C3DFC0F'], 'id': '85000000600A098000A4B28D0036102C5C3DFC08',
'name': 'group1'},
{'hosts': ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D003010345C3DFC14'],
'id': '85000000600A098000A4B9D100360F765C3DFC1C', 'name': 'group2'},
{'hosts': [], 'id': '85000000600A098000A4B9D100360F775C3DFC1E', 'name': 'group3'}]
fake_host_groups.return_value = host_groups_response
fake_hosts.return_value = hosts_response
self._set_args(
{"state": "present", "id": "84000000600A098000A4B28D0030102E5C3DFC0F", "hosts": ["host1", "host2"]})
hostgroup_object = NetAppESeriesHostGroup()
with self.assertRaisesRegexp(AnsibleFailJson,
"The option name must be supplied when creating a new host group."):
hostgroup_object.apply()
| [
"[email protected]"
] | |
04b46364ad94fe49d0fd070053bbdd62de335055 | 62b736eff115a6d9cfd323c1b396c94f8a9302fe | /tkinter/animation2.py | 50dae12d5e1d849d2ab1416929f181671379c903 | [] | no_license | ccnelson/Python | dccbb9a2c00f8124216f2f4d4202b94907134083 | ebd0c401b23aee7467332d692588f02cda0ff935 | refs/heads/master | 2023-04-02T09:09:55.716686 | 2021-03-28T18:16:15 | 2021-03-28T18:16:15 | 184,681,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,089 | py | # the right way!
import tkinter as tk
class Admin():
count = 0
a = Admin()
root = tk.Tk()
text = tk.Text(root, background='black', foreground='lawn green',\
font=('Courier', 3), height=27, width=40)
animation = ["""
M88OOZOZOODNN
O$$$ZZZZZZZZZZZ$8M
NN$$$ZZZOOOOOOZZZZ$ONN
NZ$$O8ODDDNNNNNNDDD8OZZZN
O$ZZOO8NNDDDDDDDNND88OZZON
8ZOOO8D8MMM MNN8D8O8O$D
ZOIZOOOONM MODOOZZZ
D777$OZZ8M D888ZZZNN
$II$$$OOM DZZZZ$OZM
$7777$$$ NZZ$$ZZON
Z$$$7777 NZZ$$ZZZN
Z$$$7?77 NZZ77$ZZN
DOO$I=++N M8III7$88M
DO8ZI=~~8 N$++I7$88M
N88ZI+~~ZN 8I==7$ZDD
MMDZ$II:=?D8NM MD$O7~+7IZOD
MOZ$$+~~77$Z8888O7??=+I77ODN
D8ZZI=~==+I777I?+++=I$$$DN
MNNZZ$+++======?II7Z8DD
MNDOOOOZZZZZO88DMM
MNNNDDDDDDNMMM
""", """
N8Z7777I777$DMM
NOO?I++++I777II+??7M
DZIII++77$$ZZZOO$77IOM
D??II77$OOO8NNNNNDND8Z$88
NM$?III7$Z88D8DDD8DN8DDOZZ$
D77+7777Z88MM MNDDDD8OO8
I++I7777OD MMMDD88O8
?++I$I$$8M M8D88OZN
N?II7I7$$M NO88OZO
DI77$IIZZ MOOOZZZ
D7$$$I?$$ MOZZ$ZZ
N$Z$7?+$$ MO$$$$Z
MZ$$7I+I?M OI+?$$8
MO$$$7?++D 7+++7ZD
M8ZZ$$I+=OM N?=??$ON
MDD8$777~+7DDM N$77?=?7IDM
NMDO$77~~=77ZO888OZI++=+7ZZM
MMN8O$$?~~==+?IIII?++++?ZD8
ND8DZ$7~::::::~~=??$NM
MMDD8OOZ$$Z$$Z8DDN
MMNDDDDDDDDDNMM
""", """
MM
OZ$II??III$D
O$$?=:,,,,~==++IOO
O7??=::,::=7$$$$7$$O
8==++:,:77O8DDD888888D
NN7+=~~:~=OO88888DNNDD8ON
N77+++~~~IZNNM MM888MD8DD
7++=I+~~IOD MNNOD888
I===7=~~78N MMDD888
M+++I??++OD N88OON
N?III7+++ZN NZOZZD
NIII77?++ON N$ZZZD
N7777$I??ZN NIZ$$D
MO$$$77IIZ8 MMO=IIIN
MOZZZ77II$OM NN7=?IIM
8OOZ$$777OD M8OI+?7I
NDDDZZZZII7ZODM MMN$I??+?$NN
MNNDOZZZ7I+77$O888$?+~~~7D
MMN8OZZ$II??II7??+=~~~=OM
NNDDO87II=~~==~~+IIO
MMMD88ZZ$$777$Z8NNM
MMNND888OOOOODNMM
MMMMMMMM
""", """
MMNNN
NO7I????77ZN
8DI+=::,:?II7Z8D
DZZ==~ ,,:I$$$ZOD8M
OI===: ::~IZZZO88888
$+===: ~~+ZO88DND88D
N=~~~~:,IIODMMMNDDNNDM
O~~?+~:~OON MODNND
MMZ~=??~:~88M MD88NDM
NN$=?II+~+DD MNOZ8DN
DD$?I77?+?DD M8OOOOD
DD$I777I+?DD MO$$ZOD
NNZ7$7$7IIDD NZ?I7ZD
MM8$$77$$788 N7++?$N
MM8ZZ$$$$788M DI??+ZM
MM8OOZZ$$$OOD 8I+++OM
M88OOZZ$$7ZZNDDO+?==+M
MD8OOOZ$77$ZZ$$I=+==I
DD88OZZ7777?I?+++??$
MNDD88O$$$77???I$OON
MMNNDOZZ$7777ODMM
MMNND88OZZZZODM
MMNNNNNNNM
MMMMM
""", """
MMDOZOOD
M$$??+I?7N
D$++~~:==?OD
MZI=~~:,~~IO8M
Z?~~~~,,~~IZDND
7=:~~: ~~I8NDD
MM+::~~: ,+=IDDDD
NN~::+=,,,++7ONDD
NN~::++:,:??7ZDDD
DD+=?II=:~II7Z88D
DD++I7I?~=III7OOO
DD??777I++II?7ZOZ
DDI7$777I???=I$$$
DDZZ$Z$$7III~=+II
DNZZ$ZZ$$777==+II
NNOOZZZ$Z7$$?=+77
MM888OOOZ$777=7OO
888OOOZ$77$?Z88
D8888OO$77$IODD
NDDD88O$$$$7OM
NDNND8ZZZZON
MNDNDD8OOZDM
MMNNNDDN
""", """
Z$778M
D?+==IOM
DD$=~~~+IO
88I~~~~=+Z
NZZ+:~~::~I
N$Z=::~:::IMM
N$$=,:~~:,+MM
N$7~,~++::+NN
N77=:=++::+NN
N$7+=???=:+NN
D$Z??III++?NN
DZZII777??INN
DZO77$$$II7NN
NO8Z$$$$$ZZNN
NOOZZZ$$ZZZNN
NOOZZZZZZZZNN
N88O8OOOOZOMM
N88O8OOOOOOMM
MD8O8OOOOO8MM
MDD88D88888
MNDDDDDDDN
MMNDNNNDNN
MNNDDMM
""", """
NOO$Z8
8$II++?ZZN
N$I++::~++IO
D$?++:::==?$
NNDZI++, :=~~=D
NNDZ7??, ,~~~~8MM
MDDD8$I?, ~~=:IDD
NDDNZOII, ,++=~+OZM
DDDN88II, ,+++~=ZZM
D88D8$II~::?II++$$N
8OOZ$$II~~+III??$$N
OZZ$I7II==?7777IZZN
O777=?II+?I77777ZZN
Z++= :III7$$$$$Z8ON
O??~,~II77$ZZ$$O88N
8II=:~II77$$$$ZO88N
MZOI~=77$$ZOOO88NN
88$~?$7$ZZO888DNN
DD$=I77$ZZ8888DMM
MMO7$77$Z8DDDDM
NZ$$$O8DNNDN
MOOOZ88DNNNM
MNNNNNNMM
""", """
NDDMM
8Z77?+?II$ZM
NMO$7II~:,~~~+7OOM
MDDO$$$$=~,,,:~?778
D88O88ZZZI~,,,,===?Z
DO8DDDDDZZI,, ,=+==7
MDNNDDMMMNDO=~,,===~=D
NNNN8M N7I~~=??=~$MM
NN88D M$$=~=II+~7NN
MDD8ON OO?=?III+I88
N8O8OD OO7+III7IIOOM
NO$$$8 OOI+I777IIOON
DZ7?IO OO7?I$$777OON
D7?++$ OO$7$$$7$O88M
N7+++7 ZZ77ZZZ$ZOD8M
M$+++I MZZI$ZZ$ZZ8DDM
N+==+IZDDNZ$$$$$ZZZO8N
?===+777$$$$$$$OOO88N
$??+++III$7$$$ZOOO8DM
NOO7?I???77ZZZODDDDN
MMD$777$$$OO8DNNNM
M8OZZZZ8DDDNMMM
MNNNNNMMMM
MMM
""", """
MM
MMO$I????7$$ZD
NOII++~~~:,,,:~?$OZN
Z7I7$$$II+~, ,:=+77Z
8OO888DD888O+~~::?=~+O
DDO88DDD88DD88Z??::~++=7D
D888N8OZMM M8ZO$:~=+++7DD
N888DONMM MD8O=~=+I=+$$
D888D8MMM NNO?~=+7+=II
8O888DM D7+++I7??+M
OZOO$D M$++?77IIIN
OZZZ7D M$???77IIIN
ZZZZ?D M$?II7$777N
ZI??=ZM DI?7I7$$OOM
OI+++7D OII777$Z88M
D7??+?OMM NMZII$$$ZZ88M
NZZ?+??I8M N$77?7$ZZZ88NN
DD7~~++7Z888OZ7++?$ZZZODDMM
MM8=~~~???77II+?+7ZZOOONN
O7++:::~~~=I77ZO8DDDM
MN88$7777$$O888NMMMM
MNM8OOOOOO8DDNMM
MMMMMMMM
""", """
DZ7$$777$$Z8
DZI7?IIII7I?+++?7ZZD
$7$7ZZ8OOZZ$II???II$
DDZO8DDDNNNNND8OZ$$777?78
NZZZ8NDDDD8DDD8888OZ7III?$DD
O8O8MDOONM NN88$$$77+IID
88OO8ONMM NZ777?I??7
MOOOO8DM O$Z77I???
8OOO8ZM 8ZZ7777I?N
MMOOZ$$$M DZZ7$777ID
MMOZ$777 NZZI7$$$7D
MMOZ$II7 N$$?77Z$$D
DOI+=+N OII+7$$$ZM
NOI++=8 $??+77$$OM
M87??~$N M7+++7$ZZ8
8$$?+?7I$N MZO7+=??$Z8DDM
NOO7==++I$O8888Z??=~?77$8DMM
MDDO++++=?I77I?+~~:~7Z$ZDN
MO7??+~::::~~++7$O88DM
MNDN8ZZZZ$$ZOO8DMMM
MMMNDDDDDDNNNMM
"""]
def update():
text.delete(1.0, 'end')
text.insert('end', animation[a.count])
text.pack()
a.count +=1
if a.count == 10:
a.count = 0
root.after(88, update)
root.after(0, update)
root.mainloop()
| [
"[email protected]"
] | |
f56a97f3a3b19d1678cd8892d3f96a6483ee6e44 | 0d9cd43c4bc56e917135dc329c5cd9c1a4cb2b87 | /idangr/gui.py | 53874d0a7f93456b78e7c1825872f8e07e060f58 | [
"BSD-2-Clause"
] | permissive | budanthara/IDAngr | 8ec10ec9b3736d2419244161830a8bf90f957a63 | 0acbbf9847b728e8d0fccdc06ae63c3b971f5808 | refs/heads/master | 2020-03-22T07:43:05.641342 | 2018-06-27T08:23:29 | 2018-06-27T08:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | import manage
print "######### IDAngr GUI #########"
def show():
if not manage.is_initialized():
from init_gui import IDAngrConnectDialog
if IDAngrConnectDialog.go():
from main_gui import idangr_panel_show
idangr_panel_show()
else:
from main_gui import idangr_panel_show
idangr_panel_show()
| [
"[email protected]"
] | |
ad0a30801a577810bdbbbe8d299f14c4b1640756 | b3efe04b5a6dcaba07c5dde6e146954ea777fc5c | /proc_names.py | c699ba4f4e60c444df44f053f59e0a1aec9390c4 | [] | no_license | WhiteCri/uavProcessMonitor | 4d25fd97e979a43731b1c9af6d6cdddc68feed94 | f0f3253b24183c39ff024254d7f003d333174ad3 | refs/heads/master | 2023-06-19T21:15:05.940807 | 2021-06-06T13:52:58 | 2021-06-06T13:52:58 | 341,111,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | exe_names = [ #you can write node_name when you use ROS
'server',
'fast_planner_node',
'traj_server'
]
| [
"[email protected]"
] | |
a1ebf59c3eee757e0714ca34c1e4e12d1aadd432 | 9947f5315175584c049d3690da3bd3b695c959a2 | /ch-10-sorting-and-searching/08-find-duplicates.py | 9293c5eaa2c4440bd217f7ef1729fd877f48524e | [] | no_license | GeorgeUofT/ctci-questions | 3c32a2af59f980ee952386e3784fa6cb1e88ea56 | 99f65e56592b2e709984c85401a2faf8d01e620e | refs/heads/master | 2021-05-04T05:51:02.011012 | 2018-02-05T19:41:29 | 2018-02-05T19:41:29 | 120,345,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | # Find the duplicates in an array that contains numbers between 0 and 32000.
| [
"[email protected]"
] | |
44bd4131bcd523e76930ee093593e6c0c8c07d61 | b9de33c6fb310ef69cba728b9de1a31165c3a031 | /chapter_32/spam_static.py | d461de172a54a1bde7567489c009edee95058f0c | [] | no_license | bimri/learning-python | 2fc8c0be304d360b35020a0dfc16779f78fb6848 | 5f2fcc9a08f14e1d848530f84ce3b523d1f72aad | refs/heads/master | 2023-08-12T20:30:09.754468 | 2021-10-15T20:53:49 | 2021-10-15T20:53:49 | 377,515,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | "Counting Instances with Static Methods"
class Spam:
numInstances = 0 # Use static method for class data
def __init__(self):
Spam.numInstances += 1
def printNumInstances():
print("Number of instances: %s" % Spam.numInstances)
printNumInstances = staticmethod(printNumInstances) # this version requires an extra staticmethod call # Now printNumInstances() is a static method
'''
Using the static method built-in, our code now allows the self-less method to be called
through the class or any instance of it
'''
if __name__ == "__main__":
from spam_static import Spam
a = Spam()
b = Spam()
c = Spam()
Spam.printNumInstances() # Call as simple function
a.printNumInstances() # Instance argument not passed
'''
allows subclasses to customize the static method with inheritance—a
more convenient and powerful approach than importing functions from the files in
which superclasses are coded.
'''
class Sub(Spam):
def printNumInstances(): # Override a static method
print("Extra stuff...") # But call back to original
Spam.printNumInstances() # Call static method
printNumInstances = staticmethod(printNumInstances) # Make printNumInstances a static method
if __name__ == "__main__":
print()
from spam_static import Spam, Sub
a = Sub()
b = Sub()
a.printNumInstances() # Call from subclass instance
Sub.printNumInstances() # Call from subclass itself
Spam.printNumInstances() # Call from original/parent class
"""
Moreover, classes can inherit the static method without redefining it—it is run without
an instance, regardless of where it is defined in a class tree:
"""
class Other(Spam): pass # Inherit static method verbatim
if __name__ == "__main__":
print()
from spam_static import Other
c = Other()
c.printNumInstances()
"""
Notice how this also bumps up the superclass’s instance counter, because its constructor
is inherited and run
"""
| [
"[email protected]"
] | |
c1a8fe6df31bc822bcf5b52230ba79925c590f3e | 1e67e211123f694bd807e1efb2a85a8cbdae2882 | /server/accession/namebuilder.py | 45b17c56d34b4a0b6e9bbadb3687a5eb96cb8a81 | [
"MIT"
] | permissive | coll-gate/collgate | 7590ec8dbc7cdb310d0c8452fd6c6e76cf02985d | 8c2ff1c59adda2bf318040f588c05263317a2812 | refs/heads/master | 2021-01-20T03:00:35.617958 | 2019-03-01T16:46:49 | 2019-03-01T16:46:49 | 89,474,611 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,869 | py | # -*- coding: utf-8; -*-
#
# @file batchnamebuilder
# @brief Construct a new batch name using a specific convention and some constraints
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2018-01-08
# @copyright Copyright (c) 2018 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
import time
from datetime import datetime
from django.db import connection
from organisation.models import GRC
class NamingType(object):
"""
Base naming type class.
"""
SEQUENCE = 0 # Integer auto-increment based sequence (only one's possible)
STATIC = 1 # Static string part
CONSTANT = 2 # Constant string (can be used anywhere, prefix, middle, suffix)
VARIABLE = 3 # Variable string (from a choice)
MDAY = 4 # Day of the month 1..31
MONTH = 5 # Month of the year 1..12
YEAR = 6 # Four digits year
GRC_ID = 7 # GRC name identifier
HASH = 8 # Hash string generation based on a sequence as seed and CRC-15
def __init__(self, naming_type, pos):
self._type = naming_type
self._pos = pos
@property
def type(self):
return self._type
def value(self, variables, constants):
return ""
class NamingTypeSequence(NamingType):
"""
Sequence naming type
"""
def __init__(self, pos, sequence_name, digits=6):
super().__init__(NamingType.SEQUENCE, pos)
self.sequence_name = sequence_name
self.format = "%%.0%ii" % digits
def value(self, variables, constants):
acc_seq = "SELECT nextval('%s')" % self.sequence_name
with connection.cursor() as cursor:
cursor.execute(acc_seq)
v = cursor.fetchone()[0]
return self.format % v
class NamingTypeHash(NamingType):
"""
Hash naming type
"""
SYMBOLS = []
@classmethod
def init(cls):
cls.SYMBOLS = []
# 10 digits
for i in range(0, 10):
cls.SYMBOLS.append(chr(ord('0') + i))
# 22 letters
for i in range(0, 26):
# ignore I,L,O,U
if i not in (8, 11, 14, 20):
cls.SYMBOLS.append(chr(ord('A') + i))
@classmethod
def crc15(cls, seed):
# nanoseconds time 64 bits
now = int(time.time() * 1000 * 1000)
v = [
(seed & 0xff00000000000000) >> 7,
(seed & 0x00ff000000000000) >> 6,
(seed & 0x0000ff0000000000) >> 5,
(seed & 0x000000ff00000000) >> 4,
(seed & 0x00000000ff000000) >> 3,
(seed & 0x0000000000ff0000) >> 2,
(seed & 0x000000000000ff00) >> 1,
(seed & 0x00000000000000ff),
(now & 0xff00000000000000) >> 7,
(now & 0x00ff000000000000) >> 6,
(now & 0x0000ff0000000000) >> 5,
(now & 0x000000ff00000000) >> 4,
(now & 0x00000000ff000000) >> 3,
(now & 0x0000000000ff0000) >> 2,
(now & 0x000000000000ff00) >> 1,
(now & 0x00000000000000ff)
]
crc = 0
for i in range(0, 16):
crc ^= v[i] << 7
for j in range(0, 8):
crc <<= 1
if crc & 0x8000:
crc ^= 0xC599
crc &= 0x7fff
return crc
@classmethod
def to_base32(cls, x):
"""
Crockford's base 32 plus 1 bits
"""
res = ""
if x == 0:
return ""
if x & 0x8000:
res += "1"
if x > 0x03E0:
x1 = (x & 0x7C00) >> 10
res += cls.SYMBOLS[x1]
if x > 0x001F:
x1 = (x & 0x03E0) >> 5
res += cls.SYMBOLS[x1]
x1 = x & 0x001F
res += cls.SYMBOLS[x1]
return res
def __init__(self, pos, sequence_name, length=3):
super().__init__(NamingType.HASH, pos)
self.sequence_name = sequence_name
self.length = length
if length != 3:
raise ValueError("Only max length of 3 is supported")
def value(self, variables, constants):
acc_seq = "SELECT nextval('%s')" % self.sequence_name
with connection.cursor() as cursor:
cursor.execute(acc_seq)
v = cursor.fetchone()[0]
# generate a crc-15 based on the current time and unique seed
crc15 = NamingTypeHash.crc15(v)
# return a 3 chars max string from the crc15
return NamingTypeHash.to_base32(crc15)
class NamingTypeStatic(NamingType):
"""
Serial naming type
"""
def __init__(self, pos, text):
super().__init__(NamingType.STATIC, pos)
self.text = text
def value(self, variables, constants):
return self.text
class NamingTypeConstant(NamingType):
"""
Constant string naming type
"""
def __init__(self, pos, index):
super().__init__(NamingType.CONSTANT, pos)
self._index = index
def value(self, variables, constants):
if self._index < len(constants):
return constants[self._index]
else:
raise ValueError("Missing constant")
class NamingTypeVariable(NamingType):
"""
Variable (from a choice) string naming type
"""
def __init__(self, pos, var_name):
super().__init__(NamingType.VARIABLE, pos)
if var_name not in ('GRC_CODE', 'ACCESSION_CODE', 'ACCESSION_NAME'):
raise ValueError("Unsupported variable name " + var_name)
self._var_name = var_name
def value(self, variables, constants):
v = variables.get(self._var_name, "")
if v is not None:
return v
else:
raise ValueError("Missing variable")
class NamingTypeMonthDay(NamingType):
"""
Day of the month naming type
"""
def __init__(self, pos):
super().__init__(NamingType.MDAY, pos)
def value(self, variables, constants):
day = datetime.today().day
return "%.2i" % day
class NamingTypeMonth(NamingType):
"""
Month naming type
"""
def __init__(self, pos):
super().__init__(NamingType.MONTH, pos)
def value(self, variables, constants):
month = datetime.today().month
return "%.2i" % month
class NamingTypeYear(NamingType):
"""
Year of the month naming type
"""
def __init__(self, pos):
super().__init__(NamingType.YEAR, pos)
def value(self, variables, constants):
year = datetime.today().year
return "%.4i" % year
class NamingTypeGRCCode(NamingType):
"""
GRC name identifier string naming type
"""
def __init__(self, pos):
super().__init__(NamingType.GRC_ID, pos)
def value(self, variables, constants):
return GRC.objects.get_unique_grc().identifier
class NameBuilder(object):
# Some examples of naming
SIMPLE_SERIAL = "{SEQ.6}"
PREFIXED_SERIAL = "{CONST}_{SERIAL.6}"
PREFIXED_SERIAL_WITH_DATE = "{CONST}_{SEQ.6}_{YEAR}{MONTH}{MDAY}"
def __init__(self, sequence_name, builder_format=None):
if not builder_format:
self._naming_format = NameBuilder.PREFIXED_SERIAL_WITH_DATE
else:
self._naming_format = builder_format
# count the name of constants string necessary
self._num_constants = self._naming_format.count("{CONST}")
self._recipe = []
sp = -1
i = 0
pos = 0
const_idx = 0
st = ""
np = ""
for c in self._naming_format:
if c is '{':
if len(st) > 0:
self._recipe.append(NamingTypeStatic(pos, st))
st = ""
pos += 1
sp = i
np = ""
elif c is '}' and sp >= 0:
sp = -1
parts = np.split('.')
if parts[0] == "SEQ":
if len(parts) == 1:
self._recipe.append(NamingTypeSequence(pos, sequence_name, -1))
elif len(parts) == 2:
width = int(parts[1])
self._recipe.append(NamingTypeSequence(pos, sequence_name, width))
elif parts[0] == "CONST":
self._recipe.append(NamingTypeConstant(pos, const_idx))
const_idx += 1
elif parts[0] == "VAR":
if len(parts) == 1:
raise ValueError("Missing variable name")
self._recipe.append(NamingTypeVariable(pos, parts[1]))
elif parts[0] == "MDAY":
self._recipe.append(NamingTypeMonthDay(pos))
elif parts[0] == "MONTH":
self._recipe.append(NamingTypeMonth(pos))
elif parts[0] == "YEAR":
self._recipe.append(NamingTypeYear(pos))
elif parts[0] == "GRC_CODE":
self._recipe.append(NamingTypeGRCCode(pos))
elif parts[0] == "HASH":
if len(parts) == 1:
self._recipe.append(NamingTypeHash(pos, sequence_name))
elif len(parts) == 2:
max_length = int(parts[1])
self._recipe.append(NamingTypeHash(pos, sequence_name, max_length))
else:
pass
pos += 1
elif sp >= 0:
np += c
else:
st += c
i += 1
# last suffix
if len(st) > 0:
self._recipe.append(NamingTypeStatic(pos, st))
@property
def num_constants(self):
"""
Return the number of necessary constants parameters.
"""
return self._num_constants
def pick(self, variables=None, constants=None):
"""
Pick the next name.
:param variables: Named standardized variable dict.
:param constants: List of ordered constants string.
:return: A newly generated name. After that serial if used is incremented
"""
if variables is None:
variables = {}
if constants is None:
constants = []
name = ""
for p in self._recipe:
name += p.value(variables, constants)
return name
class NameBuilderManager(object):
GLOBAL_ACCESSION = "accession"
GLOBAL_BATCH = "batch"
builders = {}
@classmethod
def init(cls):
NamingTypeHash.init()
@classmethod
def register(cls, name, builder):
if name in cls.builders:
raise ValueError("Already defined name builder for this name")
cls.builders[name] = builder
@classmethod
def get(cls, name):
return cls.builders.get(name)
@classmethod
def has(cls, name):
return name in cls.builders
| [
"[email protected]"
] | |
9b8c7faa9ecbc4bf81e1ed72473dbe553ffe7c31 | df83f97ed2c6dd199005e96bc7c494cfb3b49f8c | /GeeksForGeeks/Chocolate Distribution Problem.py | 803170c58228d3d30393c9b4bb6f758534761bf3 | [] | no_license | poojan14/Python-Practice | 45f0b68b0ad2f92bbf0b92286602d64f3b1ae992 | ed98acc788ba4a1b53bec3d0757108abb5274c0f | refs/heads/master | 2022-03-27T18:24:18.130598 | 2019-12-25T07:26:09 | 2019-12-25T07:26:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | '''
//MEMORY ERROR
from itertools import permutations
if __name__=='__main__':
T=int(input())
for _ in range(T):
N=int(input())
A=list(map(int,input().split()))
M=int(input())
A.sort()
if M==1:print('0')
else:
lst=[]
l=list(permutations(A,M))
for ch in l:
ch=list(ch)
ch.sort()
lst.append(ch[-1]-ch[0])
print(min(lst))
'''
import sys
def MinimumDifference(arr,n,m):
if n==0 or m==0 or m==1:
return 0
if m>n:return -1
arr.sort()
i=0
first,last=0,0
min_diff=sys.maxsize
while i+m-1<n:
diff=arr[i+m-1]-arr[i]
if diff<min_diff:
min_diff=diff
first=i
last=(i+m-1)
i+=1
return arr[last]-arr[first]
if __name__=='__main__':
T=int(input())
for _ in range(T):
N=int(input())
A=list(map(int,input().split()))
M=int(input())
print(MinimumDifference(A,N,M))
| [
"[email protected]"
] | |
14fc8447bbed8c468586a52217f4963fdec8fc15 | e3b42e43555cb34e9a7f44c5e1e42b06c89e2b49 | /envi/tests/msp430/irlc.py | f50db0e4a61b68e1fb1c41a4d5605096c4ae251d | [
"Apache-2.0"
] | permissive | bat-serjo/vivisect-py3 | 77eed20e8e78ff0f5bbde57eb7709c68617aeb1d | 75d58115b09c209a042713736181888fad31482c | refs/heads/master | 2021-01-11T21:54:42.853791 | 2019-01-08T20:15:57 | 2019-01-08T20:15:57 | 78,873,268 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | checks = [
# RLC
(
'RLC r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x5555)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xaaaa)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination C=1 + negative + overflow)',
{ 'regs': [(REG_R15, 0x5555)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xaaab)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination carry + zero + overflow)',
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x4000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination negative + carry)',
{ 'regs': [(REG_R15, 0xc000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f6f", 'data': "" }
),
# RLC.b
(
'RLC.b r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x1155)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xaa)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination C=1 + negative + overflow)',
{ 'regs': [(REG_R15, 0x1155)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xab)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination carry + zero + overflow)',
{ 'regs': [(REG_R15, 0x1180)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x1140)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x80)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination negative + carry)',
{ 'regs': [(REG_R15, 0x11c0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x80)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f6f", 'data': "" }
),
]
| [
"[email protected]"
] | |
1a59a935ece142b1fba84eebe71dbdb2f3ddd079 | 9161503ddd4d3044a9481cb519a4f30b7f371335 | /venv/bin/pip3 | 3e07a514109385314762d4908a7ff1c67d334d3f | [] | no_license | cuixiaozhao/HelloFlask | 48112e72300549dc06cc5abfe3c0869a902ce9ab | 165c8a69204f9dec9b09de72c4eb0468ec1d41a0 | refs/heads/master | 2020-03-28T13:36:39.983346 | 2018-09-12T03:48:55 | 2018-09-12T03:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | #!/Users/cuixiaozhao/PycharmProjects/Flask/HelloFlask/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"19930911cXS"
] | 19930911cXS |
|
f008b4ce82e6c21360f7bef5df058c46001c8a1f | 17f6881c70401dc63757cc7b5fa4d9dd396689e3 | /src/main/com/libin/yfl/10.py | 606053f9ee34ad635daf307378dc978dcea86602 | [] | no_license | BigDataRoad/Algorithm | 0ab493eeb478125b4beb62d78ce18c73e30b0496 | 2f2fb4f4b84f6c9df8adbada63b327c43ce29ddd | refs/heads/master | 2023-07-02T04:06:51.025648 | 2021-07-28T14:04:55 | 2021-07-28T14:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | '''
203. 移除链表元素
删除链表中等于给定值 val 的所有节点。
示例:
输入: 1->2->6->3->4->5->6, val = 6
输出: 1->2->3->4->5
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
cur_1 = ListNode(0)
cur = cur_1
while head:
if head.val == val:
head = head.next
else:
cur.next = ListNode(head.val)
cur = cur.next
head = head.next
return cur_1.next | [
"[email protected]"
] | |
be9c5ee84e7952ac4d8ffdddcb2eb46f037ed1d2 | 4fcb2e797ba83b310fe05461d48f02931ea5a427 | /2021/day-12/solution.py | d066e599b9ac7a138623fee02b10f61387a92589 | [] | no_license | BrentChesny/AdventOfCode | 5a642d081505563f7518c5244bb814e9e4dfc5de | dad5224961539149bed5757bbae0ccc35a3a293d | refs/heads/master | 2022-12-11T19:51:22.138655 | 2022-12-04T21:46:29 | 2022-12-04T21:46:29 | 47,266,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | from collections import defaultdict
def parse_input():
caves = defaultdict(list)
for line in open("input.txt"):
fr, to = line.strip().split("-")
caves[fr].append(to)
caves[to].append(fr)
return caves
def is_small(cave):
return cave.islower()
def solve(caves, current, visited):
if current == "end":
return 1
paths = 0
for destination in caves[current]:
if is_small(destination) and destination in visited:
continue
paths += solve(caves, destination, set(visited | {current}))
return paths
def solve_part_one():
caves = parse_input()
return solve(caves, "start", set())
def solve_with_revisit(caves, current, visited, revisited):
if current == "end":
return 1
paths = 0
for destination in caves[current]:
if is_small(destination):
if destination in visited:
if revisited:
continue
else:
if destination not in ["start", "end"]:
paths += solve_with_revisit(
caves, destination, set(visited | {current}), destination
)
else:
paths += solve_with_revisit(
caves, destination, set(visited | {current}), revisited
)
else:
paths += solve_with_revisit(
caves, destination, set(visited | {current}), revisited
)
return paths
def solve_part_two():
caves = parse_input()
return solve_with_revisit(caves, "start", set(), None)
def main():
print("Part one: ", solve_part_one())
print("Part two: ", solve_part_two())
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
89dcc06948e06f2d7ef859a76a0998462786a207 | 77f2be6cf991effb23cee0a326c14f956536672d | /airbrake/urls.py | d5142e6018d84df79b226391238dc9718bd7ab26 | [] | no_license | chriscauley/django-airbrake-lite | 93df9a826fa76790030f032646c2da75e8f85df4 | bcdde8c4f6f102fedd11e8fd0d7aa195ac9c3312 | refs/heads/master | 2021-01-13T03:18:48.692043 | 2017-09-24T14:25:12 | 2017-09-24T14:25:12 | 77,580,123 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.conf.urls import url
import views
urlpatterns = [
url("js_error/$",views.js_error,name="js_error"),
]
| [
"[email protected]"
] | |
a547a4cbb4dff01e327264fb4b7c55d089927cc9 | 9a5b81fd11a5e6fcae6ac166fc44a2d80f7c22e1 | /pyflow/demo/helloWorld/helloWorld.py | 0efe70710cb4bd9645ef0cc98de6e89dadd707b2 | [] | no_license | moleculo/pyflow | a636cbed88dc4014394bd8a55660e6e6f57fe977 | 62ecdf32889d099e5b37eac0b4e17ed6612c6443 | refs/heads/master | 2021-01-18T06:05:05.281246 | 2013-05-08T01:13:33 | 2013-05-08T01:13:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | #!/usr/bin/env python
#
# Copyright (c) 2012-2013 Illumina, Inc.
#
# This software is provided under the terms and conditions of the
# Illumina Open Source Software License 1.
#
# You should have received a copy of the Illumina Open Source
# Software License 1 along with this program. If not, see
# <https://github.com/downloads/sequencing/licenses/>.
#
#
# This demo shows possibly the simplist possible pyflow we can create --
# a single 'hello world' task. After experimenting with this file
# please see the 'simpleDemo' for coverage of a few more pyflow features
#
import os.path
import sys
# add module path by hand
#
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/../../src")
from pyflow import WorkflowRunner
# all pyflow workflows are written into classes derived from pyflow.WorkflowRunner:
#
class HelloWorkflow(WorkflowRunner) :
# a workflow is defined by overloading the WorkflowRunner.workflow() method:
#
def workflow(self) :
#
# The output for this task will be written to the file helloWorld.out.txt
#
self.addTask("easy_task1", "echo 'Hello World!' >| helloWorld.out.txt")
# Instantiate the workflow
#
wflow = HelloWorkflow()
# Run the worklow:
#
retval = wflow.run()
# done!
sys.exit(retval)
| [
"[email protected]"
] | |
5348c082b53d461884706e83f902f1bd079d2e12 | 31fb7c74b94e46a325e6b05501c6972a401cf423 | /PYTHON/BASIC_PYTHON/수업내용/04/04-029.py | 1d0861db9a70b872a9db5c3adf7ecd597cd10dd6 | [] | no_license | superf2t/TIL | f2dacc30d6b89f3717c0190ac449730ef341f6a4 | cadaaf952c44474bed9b8af71e70754f3dbf86fa | refs/heads/master | 2022-04-10T13:55:24.019310 | 2019-12-12T11:15:31 | 2019-12-12T11:15:31 | 268,215,746 | 1 | 0 | null | 2020-05-31T05:32:46 | 2020-05-31T05:32:46 | null | UTF-8 | Python | false | false | 1,098 | py | #04-029.py
nums='1237894673683038478236749192738623234234'
if 1:
cnt = {}
for num in nums:
cnt.setdefault(num, 0)
cnt[num] += 1
else:
# 위에서 for문을 돌려서 만든 cnt를 Counter 메소드를 통해 한 번에 만들 수도 있음!
from collections import Counter
cnt = Counter(nums)
print(cnt)
# 1. 등장 횟수(빈도 수)를 기준으로 오름차순으로 정렬 가능!?!?
# 어렵고도 신기함..!
if 0:
# 1-1)
cnt_tmp = { i:cnt[i] for i in sorted(cnt, key = lambda x : cnt[x]) }
print(cnt_tmp)
else:
# 1-2)
cnt_tmp = { i:j for i, j in sorted(cnt.items(), key = lambda x : x[1]) }
print(cnt_tmp)
# 2. key 0 ~ 9까지의 ...
##cnt_tmp = dict.fromkeys("0123456789", 0)
cnt_tmp = { k : cnt.get(k, 0) for k in "0123456789"}
##for i in cnt_tmp:
## cnt_tmp[i] = cnt.get(i, 0)
print(cnt_tmp)
##from collections import Counter
##X = Counter(nums)
##y = [ (x, y) for x, y in X.items() ]
##y.sort()
##X = { x:y for x, y in y }
##print(X)
| [
"[email protected]"
] | |
fbf299007fe1f34f9f48f8ad4ed2ef2bd8f6d4e2 | 1b2d5f0635459a02f82b574e5de632f67679210a | /5/11_sin_gru_tf.py | 9a2132f121e7e66adb7875b7fbb2408c1a4302a2 | [] | no_license | ydocore/deeplearning-keras-tf2-torch | f9b117e693b4a122bfb37fc77ae082de2140afd7 | 19aa983de1b0f55985179549603327281b92fcb2 | refs/heads/master | 2022-12-22T16:26:11.249773 | 2020-09-15T12:54:26 | 2020-09-15T12:54:26 | 276,814,737 | 0 | 1 | null | 2020-07-03T05:24:38 | 2020-07-03T05:24:38 | null | UTF-8 | Python | false | false | 4,399 | py | '''
5.3.2 GRU - TensorFlow (sin波)
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GRU
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from callbacks import EarlyStopping
class RNN(Model):
def __init__(self, hidden_dim):
super().__init__()
self.l1 = GRU(hidden_dim, activation='tanh',
recurrent_activation='sigmoid',
kernel_initializer='glorot_normal',
recurrent_initializer='orthogonal')
self.l2 = Dense(1, activation='linear')
def call(self, x):
h = self.l1(x)
y = self.l2(h)
return y
if __name__ == '__main__':
np.random.seed(123)
tf.random.set_seed(123)
'''
1. データの準備
'''
def sin(x, T=100):
return np.sin(2.0 * np.pi * x / T)
def toy_problem(T=100, ampl=0.05):
x = np.arange(0, 2*T + 1)
noise = ampl * np.random.uniform(low=-1.0, high=1.0,
size=len(x))
return sin(x) + noise
T = 100
f = toy_problem(T).astype(np.float32)
length_of_sequences = len(f)
maxlen = 25
x = []
t = []
for i in range(length_of_sequences - maxlen):
x.append(f[i:i+maxlen])
t.append(f[i+maxlen])
x = np.array(x).reshape(-1, maxlen, 1)
t = np.array(t).reshape(-1, 1)
x_train, x_val, t_train, t_val = \
train_test_split(x, t, test_size=0.2, shuffle=False)
'''
2. モデルの構築
'''
model = RNN(50)
'''
3. モデルの学習
'''
criterion = losses.MeanSquaredError()
optimizer = optimizers.Adam(learning_rate=0.001,
beta_1=0.9, beta_2=0.999, amsgrad=True)
train_loss = metrics.Mean()
val_loss = metrics.Mean()
def compute_loss(t, y):
return criterion(t, y)
def train_step(x, t):
with tf.GradientTape() as tape:
preds = model(x)
loss = compute_loss(t, preds)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
return loss
def val_step(x, t):
preds = model(x)
loss = compute_loss(t, preds)
val_loss(loss)
epochs = 1000
batch_size = 100
n_batches_train = x_train.shape[0] // batch_size + 1
n_batches_val = x_val.shape[0] // batch_size + 1
hist = {'loss': [], 'val_loss': []}
es = EarlyStopping(patience=10, verbose=1)
for epoch in range(epochs):
x_, t_ = shuffle(x_train, t_train)
for batch in range(n_batches_train):
start = batch * batch_size
end = start + batch_size
train_step(x_[start:end], t_[start:end])
for batch in range(n_batches_val):
start = batch * batch_size
end = start + batch_size
val_step(x_val[start:end], t_val[start:end])
hist['loss'].append(train_loss.result())
hist['val_loss'].append(val_loss.result())
print('epoch: {}, loss: {:.3}, val_loss: {:.3f}'.format(
epoch+1,
train_loss.result(),
val_loss.result()
))
if es(val_loss.result()):
break
'''
4. モデルの評価
'''
# sin波の予測
sin = toy_problem(T, ampl=0.)
gen = [None for i in range(maxlen)]
z = x[:1]
for i in range(length_of_sequences - maxlen):
preds = model.predict(z[-1:])
# preds = model(z[-1:])
z = np.append(z, preds)[1:]
z = z.reshape(-1, maxlen, 1)
gen.append(preds[0, 0])
# 予測値を可視化
fig = plt.figure()
plt.rc('font', family='serif')
plt.xlim([0, 2*T])
plt.ylim([-1.5, 1.5])
plt.plot(range(len(f)), sin,
color='gray',
linestyle='--', linewidth=0.5)
plt.plot(range(len(f)), gen,
color='black', linewidth=1,
marker='o', markersize=1, markerfacecolor='black',
markeredgecolor='black')
# plt.savefig('output.jpg')
plt.show()
| [
"[email protected]"
] | |
de058075cb519f64d30d752973071422f9008b5b | c27c51f5c33e0431dbe7db6e18c21b249d476cfa | /OpenSource_Python_Code/horizon-master/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/views.py | 299ec446133658878663a3ffe30cf05d036707cd | [
"Apache-2.0"
] | permissive | bopopescu/Python_Stuff | 9bef74e0db17bb5e3ba2d908ced01ee744820d80 | 9aa94a0fa5e4e802090c7b29ec88b840e304d9e5 | refs/heads/master | 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null | UTF-8 | Python | false | false | 2,173 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instance snapshots.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images_and_snapshots.snapshots \
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateSnapshot
template_name = 'project/images_and_snapshots/snapshots/create.html'
success_url = reverse_lazy("horizon:project:images_and_snapshots:index")
@memoized.memoized_method
def get_object(self):
try:
return api.nova.server_get(self.request,
self.kwargs["instance_id"])
except Exception:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_("Unable to retrieve instance."),
redirect=redirect)
def get_initial(self):
return {"instance_id": self.kwargs["instance_id"]}
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['instance'] = self.get_object()
return context
| [
"[email protected]"
] | |
24055a1a6e8b5a0c6a0d50ceec70784bc1200932 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/datadog/azure-mgmt-datadog/azure/mgmt/datadog/aio/_configuration.py | 8f5a2a189161e3f0b90013118775e86eb5fd19a4 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,791 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MicrosoftDatadogClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for MicrosoftDatadogClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MicrosoftDatadogClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-06-01") # type: Literal["2022-06-01"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-datadog/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| [
"[email protected]"
] | |
0106a8b00c02f1cf93357f0dbdee964c833a0cad | 9ed385053e7f28bfd0c6f186fc4963faac43eb96 | /store/models.py | 3f05789263ca28aa75c52f50113c0aaacc36d485 | [] | no_license | Pagante/greatkart-django | ffadfb5d4827220f3df588fb1d21dc28f1359ce0 | d4bb679c7fd270435f4ce0cc8854bdb3d2e134dd | refs/heads/main | 2023-05-12T01:07:53.092949 | 2021-05-30T16:34:07 | 2021-05-30T16:34:07 | 365,899,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | from django.db import models
from django.db.models.aggregates import Count
from django.db.models.deletion import CASCADE
from django.db.models.expressions import Case
from django.urls.base import reverse
from category.models import Category
from accounts.models import Account
from django.db.models import Avg, Count
# Create your models here.
class Product(models.Model):
product_name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
description = models.TextField(max_length=500,blank=True)
price = models.IntegerField()
images = models.ImageField(upload_to='photos/products')
stock = models.IntegerField()
is_available = models.BooleanField(default=False)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
create_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def get_url(self):
return reverse('product_detail', args= [self.category.slug, self.slug])
def __str__(self):
return self.product_name
def averageReview(self):
reviews = reviewRating.objects.filter(product=self, status=True).aggregate(average=Avg('rating'))
avg = 0
if reviews['average'] is not None:
avg = float(reviews['average'])
return avg
def countReviews(self):
reviews = reviewRating.objects.filter(product=self, status=True).aggregate(count=Count('id'))
count=0
if reviews['count'] is not None:
count = int(reviews['count'])
return count
class VariationManager(models.Manager):
def colors(self):
return super(VariationManager, self).filter(variation_category='color', is_active = True)
def sizes(self):
return super(VariationManager, self).filter(variation_category ='size', is_active = True)
variation_category_choices = (
('color', 'color'),
('size', 'size')
)
class Variation(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
variation_category = models.CharField(max_length=200, choices= variation_category_choices)
variation_value = models.CharField(max_length=200)
is_active = models.BooleanField(default=True)
create_date = models.DateTimeField(auto_now=True)
objects = VariationManager()
def __str__(self):
return self.variation_value
class reviewRating(models.Model):
product = models.ForeignKey(Product, on_delete= models.CASCADE)
user = models.ForeignKey(Account, on_delete=CASCADE)
subject = models.CharField(max_length=50, blank=True)
reviews = models.TextField(max_length=500, blank=True)
rating = models.FloatField()
ip = models.CharField(max_length=20)
status = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.subject
| [
"[email protected]"
] | |
6b4e19b6546fe9176985daf6e6d044c1addd810a | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v6/services/services/shopping_performance_view_service/client.py | 1fe4cbf44ebee3cd684400fb58b5e6bba9d5f8fb | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,727 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.resources.types import shopping_performance_view
from google.ads.googleads.v6.services.types import (
shopping_performance_view_service,
)
from .transports.base import (
ShoppingPerformanceViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ShoppingPerformanceViewServiceGrpcTransport
class ShoppingPerformanceViewServiceClientMeta(type):
"""Metaclass for the ShoppingPerformanceViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ShoppingPerformanceViewServiceTransport]]
_transport_registry["grpc"] = ShoppingPerformanceViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ShoppingPerformanceViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ShoppingPerformanceViewServiceClient(
metaclass=ShoppingPerformanceViewServiceClientMeta
):
"""Service to fetch Shopping performance views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ShoppingPerformanceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ShoppingPerformanceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ShoppingPerformanceViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
ShoppingPerformanceViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def shopping_performance_view_path(customer_id: str,) -> str:
"""Return a fully-qualified shopping_performance_view string."""
return "customers/{customer_id}/shoppingPerformanceView".format(
customer_id=customer_id,
)
@staticmethod
def parse_shopping_performance_view_path(path: str) -> Dict[str, str]:
"""Parse a shopping_performance_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/shoppingPerformanceView$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[
str, ShoppingPerformanceViewServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the shopping performance view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ShoppingPerformanceViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ShoppingPerformanceViewServiceTransport):
# transport is a ShoppingPerformanceViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ShoppingPerformanceViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_shopping_performance_view(
self,
request: shopping_performance_view_service.GetShoppingPerformanceViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> shopping_performance_view.ShoppingPerformanceView:
r"""Returns the requested Shopping performance view in
full detail.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetShoppingPerformanceViewRequest`):
The request object. Request message for
[ShoppingPerformanceViewService.GetShoppingPerformanceView][google.ads.googleads.v6.services.ShoppingPerformanceViewService.GetShoppingPerformanceView].
resource_name (:class:`str`):
Required. The resource name of the
Shopping performance view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.ShoppingPerformanceView:
Shopping performance view.
Provides Shopping campaign statistics
aggregated at several product dimension
levels. Product dimension values from
Merchant Center such as brand, category,
custom attributes, product condition and
product type will reflect the state of
each dimension as of the date and time
when the corresponding event was
recorded.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a shopping_performance_view_service.GetShoppingPerformanceViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
shopping_performance_view_service.GetShoppingPerformanceViewRequest,
):
request = shopping_performance_view_service.GetShoppingPerformanceViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_shopping_performance_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ShoppingPerformanceViewServiceClient",)
| [
"[email protected]"
] | |
1a7bd63272c5441eea5544e3e4fd0a3dd2a93d9b | 537345f90de44dac4e2a20037d21f858f82e3120 | /concatenateGenbankFiles.py | 08d2e28a0b99eda871a365d7827e7b9b97120c28 | [] | no_license | kaiyaprovost/misc_scripts | f8fc8ca646c5c97ad3495e612bc9656e2b8d238c | 5c460ea608c13ff271fa6772fe548b89aa68c225 | refs/heads/master | 2021-11-11T15:33:34.211463 | 2021-11-10T23:11:56 | 2021-11-10T23:11:56 | 237,049,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 9 18:27:49 2016
@author: kprovost
This script takes multiple genbank .gb files adn turns them into one file
Usage: give path, creates a folder
python concatenateGenbankFiles.py <optional path>
"""
def concatGB(filename,outfile):
with open(filename,"r") as infile, open(outfile,"a") as outfile:
entry = infile.read()
outfile.write(entry+"\n")
def main():
import glob
import os
import sys
#path = sys.argv[1]
try:
path = sys.argv[1]
print("\tPath entered: ",searchTerm)
except:
print("No path given, using current working directory")
path = os.getcwd()
print(path)
os.chdir(path)
outpath = path+"/concatGenbankFiles/"
if not os.path.exists(outpath):
print("creating folder: ",outpath)
os.makedirs(outpath)
concatName = "ConcatenatedGbFiles.gb"
print("Concatenated file: ",concatName)
outfile = outpath+concatName
os.chdir(path)
for filename in glob.glob("*.gb"):
concatGB(filename,outfile)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
9395ac23fad1778148b66efae8bb997bf22d7431 | 18a645c8e543c905528364fad8c429e209903e80 | /acapy-client/acapy_client/api/issue_credential_v_10/post_issue_credential_send_offer.py | 7d95d291316c0d7eac46e78b101473fa2a2c0925 | [] | no_license | cjhowland/acapy-revocation-demo | 854e9aff4236c034ae9cc00206abde87f257bc45 | 01c21eb38d085c5633e505908c26c2e9ebfe3110 | refs/heads/main | 2023-07-16T02:01:05.659695 | 2021-05-12T17:00:44 | 2021-05-12T17:00:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.v10_credential_exchange import V10CredentialExchange
from ...models.v10_credential_offer_request import V10CredentialOfferRequest
from ...types import Response
def _get_kwargs(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Dict[str, Any]:
url = "{}/issue-credential/send-offer".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
json_json_body = json_body.to_dict()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"json": json_json_body,
}
def _parse_response(*, response: httpx.Response) -> Optional[V10CredentialExchange]:
if response.status_code == 200:
response_200 = V10CredentialExchange.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[V10CredentialExchange]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Response[V10CredentialExchange]:
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
response = httpx.post(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Optional[V10CredentialExchange]:
""" """
return sync_detailed(
client=client,
json_body=json_body,
).parsed
async def asyncio_detailed(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Response[V10CredentialExchange]:
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
async with httpx.AsyncClient() as _client:
response = await _client.post(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Optional[V10CredentialExchange]:
""" """
return (
await asyncio_detailed(
client=client,
json_body=json_body,
)
).parsed
| [
"[email protected]"
] | |
299bb263f3c5c29e06546b2fafcc922341219476 | 7e516383bd528e79719f04e88e8839671de5f81b | /l10n_ec_talent_growth/__manifest__.py | 7342e79f47b66f49b21ce8565d79023d863d2ae3 | [] | no_license | hc-mic29/primerabase | c96b1bd8ee77d4217b528dd4f9f50274f5711fca | 16fcc33bbf5bfcda236cc1a7a595cccf15aa5b44 | refs/heads/main | 2023-06-14T23:50:46.970941 | 2021-07-06T22:38:40 | 2021-07-06T22:38:40 | 383,600,215 | 0 | 0 | null | 2021-07-06T21:17:38 | 2021-07-06T21:17:38 | null | UTF-8 | Python | false | false | 1,033 | py | # -*- coding: utf-8 -*-
{
'name': "Talent Growth",
'summary': """
Modulo de talent Growth""",
'description': """
En el presente modulo se llevara acabo el control de desarrollo y crecimiento del personal
""",
'author': "Opa Consulting",
'website': "http://www.opa-consulting.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/12.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Employee',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr'],
# always loaded
'data': [
'security/hr_talent_growth_security.xml',
'security/ir.model.access.csv',
'views/hr_employee_view.xml',
'views/hr_talent_growth.xml',
# 'views/templates.xml',
],
# only loaded in demonstration mode
'demo': [
# 'demo/demo.xml',
],
'installable':True,
'application':True,
} | [
"[email protected]"
] | |
7d4ec04cdb2b19f3b0eb63afcab1dce44a9b3f4a | 62def70e2d802375b1ad28b0ac85fee2010ee0a9 | /flask/server/app2.py | 5ba7860c73d4a3a14efbd47ab066e88ac9058194 | [] | no_license | MarkAYoder/BeagleBoard-exercises | c48028b6e919d8c04dedfd2040a133c760f0f567 | 2fab7c7f7aa09bf101168dfb279e690bc43a6514 | refs/heads/master | 2023-07-22T08:06:19.482358 | 2023-07-12T19:24:51 | 2023-07-12T19:24:51 | 5,111,513 | 48 | 41 | null | 2021-07-29T18:02:29 | 2012-07-19T15:07:14 | JavaScript | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python3
# From: https://towardsdatascience.com/python-webserver-with-flask-and-raspberry-pi-398423cc6f5d
import gpiod
CHIP = '0' # P9_11
offsets=[30]
from flask import Flask, render_template
app = Flask(__name__)
chip = gpiod.Chip(CHIP)
lines = chip.get_lines(offsets)
# Set button as an input
lines.request(consumer="app2.py", type=gpiod.LINE_REQ_DIR_IN)
@app.route("/")
def index():
# Read Button Status
vals = lines.get_values()
templateData = {
'title' : 'GPIO input Status!',
'button' : vals,
}
return render_template('index2.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8081, debug=True) | [
"[email protected]"
] | |
5971a56f860c99200f932f59b086d7cf6ebe4b6a | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/641-tideGauge.py | dac5196ec7ba7de35a7df47a6c6f4edc7937cd21 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 641
y = 642
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"[email protected]"
] | |
d0306b119643795c0a1f9cc58722de910337f986 | 83efa5604be59078372c55998b9c686774e73e89 | /utils/utils.py | 17f3c5461f1aaaaefd48e7dc908a631518ace97a | [
"MIT"
] | permissive | Dawa406/import_to_gee | 5c7a2db656cf6fe8ad3b4d954bcc38e06b4a0d32 | 4d13a261fff371eb6a18076fdd1ea742fddd814b | refs/heads/master | 2023-02-07T14:12:07.878640 | 2020-12-24T08:35:31 | 2020-12-24T08:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | import os
import ee
from utils import message as ms
ee.Initialize()
def display_asset(output, asset):
"""remove the manifest from the asset name and display it to the user"""
asset = asset.replace('projects/earthengine-legacy/assets/', '')
output.add_msg(ms.asset_created.format(asset), 'success')
return
def isAsset(asset_descripsion, folder):
"""Check if the asset already exist in the user asset folder
Args:
asset_descripsion (str) : the descripsion of the asset
folder (str): the folder of the glad assets
Returns:
exist (bool): true if already in folder
"""
exist = False
liste = ee.data.listAssets({'parent': folder})['assets']
for asset in liste:
if asset['name'] == os.path.join(folder,asset_descripsion):
exist = True
break
return exist | [
"[email protected]"
] | |
fab5b4d8041420a521a36b1045fd6be52e330cc1 | e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6 | /nova/cmd/compute.py | 08389a16f24119b2c164ccd399e82698802ab5ff | [] | no_license | KevinKaiQian/polar-bear | 46a814c746246394f76505846166673a049f12f2 | 61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e | refs/heads/master | 2022-04-29T02:15:35.536039 | 2021-05-19T12:33:07 | 2021-05-19T12:33:07 | 172,068,536 | 2 | 0 | null | 2022-03-29T21:56:51 | 2019-02-22T13:11:58 | Python | UTF-8 | Python | false | false | 2,444 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Compute."""
import shlex
import sys
import os
#sys.path.append(os.path.dirname(os.getcwd()))
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from oslo_log import log as logging
from oslo_privsep import priv_context
#import pdb;pdb.set_trace()
from nova.cmd import common as cmd_common
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
from nova.i18n import _LW
from nova import objects
from nova.objects import base as objects_base
from nova import service
from nova import utils
#from nova import version
from nova import rpc
from nova.db.sqlalchemy import api as sqlalchemy_api
CONF = config.CONF
LOG = logging.getLogger('nova.compute')
def main():
#config.parse_args(sys.argv)
logging.setup(CONF, 'nova')
rpc.set_defaults(control_exchange='nova')
rpc.init(CONF)
sqlalchemy_api.configure(CONF)
#priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
#utils.monkey_patch()
objects.register_all()
#gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
cmd_common.block_db_access('nova-compute')
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
else:
LOG.warning(_LW('Conductor local mode is deprecated and will '
'be removed in a subsequent release'))
#import pdb;pdb.set_trace()
server = service.Service.create(binary='nova-compute',
topic=CONF.compute_topic,
db_allowed=CONF.conductor.use_local)
service.serve(server)
service.wait()
if "__main__" == __name__:
main()
| [
"[email protected]"
] | |
a3023c4c318b3cbafd0372ec93f51a1666a9e0cf | 090e04cd5c7f020a03eb6f0dfdb7d37cce555288 | /my_navigation_interface/navigation_interface.py | 55904118838f7e447016caec8a17dff0372079da | [] | no_license | imfog/Groove | dbcddbc040dbd4cd30991b20568046d9ac5590d3 | 3bcdc980b798e901eb1e3e87ebdada268c36f1d4 | refs/heads/master | 2022-12-28T21:57:12.125621 | 2020-10-15T03:32:15 | 2020-10-15T03:32:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,491 | py | # coding:utf-8
from ctypes.wintypes import HWND
from PyQt5.QtCore import Qt, pyqtSignal, QPoint, QEvent
from PyQt5.QtWidgets import QWidget
from .navigation_bar import NavigationBar
from .navigation_widget import NavigationWidget
from .navigation_menu import NavigationMenu
from effects import WindowEffect
class NavigationInterface(QWidget):
""" 导航界面 """
COMPACT = 0 # 折叠窗口
OVERLAY = 1 # 显示导航菜单,窗口不展开
IN_LINE = 2 # 导航窗口展开
displayModeChanged = pyqtSignal(int)
switchInterfaceSig = pyqtSignal(int)
showPlayingInterfaceSig = pyqtSignal()
showCreatePlaylistPanelSig = pyqtSignal()
switchToSettingInterfaceSig = pyqtSignal()
switchToMyMusicInterfaceSig = pyqtSignal()
switchToPlaylistCardInterfaceSig = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.effect = WindowEffect()
# 创建部件
self.navigationBar = NavigationBar(self)
self.navigationWidget = NavigationWidget(self)
self.navigationMenu = NavigationMenu(self)
self.__navigation_list = [self.navigationBar,
self.navigationWidget, self.navigationMenu]
# 设置显示导航菜单/导航部件标志位
self.__displayMode = self.COMPACT
self.__isExpanded = False
self.__isOverlay = False
# 初始化
self.__initWidget()
def __initWidget(self):
""" 初始化小部件 """
self.resize(self.navigationBar.width(), 800)
self.setCurrentIndex(0)
self.navigationWidget.hide()
# 信号连接到槽
self.__connectSignalToSlot()
# 安装事件过滤器
self.navigationMenu.installEventFilter(self)
def __connectSignalToSlot(self):
""" 信号连接到槽 """
# 发送切换窗口信号
self.navigationBar.switchInterfaceSig.connect(self.switchInterfaceSig)
self.navigationMenu.switchInterfaceSig.connect(self.switchInterfaceSig)
# 同步按钮选中状态
self.navigationBar.selectedButtonChanged.connect(
self.__selectedButtonChangedSlot)
self.navigationWidget.selectedButtonChanged.connect(
self.__selectedButtonChangedSlot)
self.navigationMenu.selectedButtonChanged.connect(
self.__selectedButtonChangedSlot)
# 发送切换窗口信号
self.navigationWidget.switchInterfaceSig.connect(
self.switchInterfaceSig)
# 按钮点击信号连接到槽
self.navigationBar.showMenuButton.clicked.connect(
self.__expandNavigationWindow)
self.navigationBar.searchButton.clicked.connect(
self.__expandNavigationWindow)
self.navigationMenu.showBarButton.clicked.connect(
self.__collapseWindow)
self.navigationWidget.showBarButton.clicked.connect(
self.__collapseWindow)
self.navigationMenu.playingButton.clicked.connect(
self.__collapseWindow)
for widget in self.__navigation_list:
widget.playingButton.clicked.connect(
self.showPlayingInterfaceSig)
widget.settingButton.clicked.connect(
self.switchToSettingInterfaceSig)
widget.musicGroupButton.clicked.connect(
self.switchToMyMusicInterfaceSig)
widget.playlistButton.clicked.connect(
self.switchToPlaylistCardInterfaceSig)
widget.createPlaylistButton.clicked.connect(
self.showCreatePlaylistPanelSig)
def resizeEvent(self, e):
""" 调整小部件尺寸 """
self.navigationBar.resize(self.navigationBar.width(), self.height())
self.navigationMenu.resize(self.navigationMenu.width(), self.height())
self.navigationWidget.resize(
self.navigationWidget.width(), self.height())
def eventFilter(self, obj, e: QEvent):
""" 过滤事件 """
if obj == self.navigationMenu:
if e.type() == QEvent.Hide:
self.navigationBar.show()
return super().eventFilter(obj, e)
def __expandNavigationWindow(self):
""" 展开导航窗口 """
self.__isExpanded = True
if not self.__isOverlay:
# 显示导航部件
self.__displayMode = self.IN_LINE
self.resize(self.navigationWidget.width(), self.height())
self.navigationWidget.updateWindow()
self.displayModeChanged.emit(self.IN_LINE)
self.navigationWidget.show()
self.navigationBar.hide()
else:
# 显示导航菜单
self.__displayMode = self.OVERLAY
self.navigationMenu.move(self.mapToGlobal(QPoint(0, 0)))
self.navigationMenu.updateWindow()
self.navigationMenu.aniShow()
# self.displayModeChanged.emit(self.OVERLAY)
self.navigationBar.hide()
def __collapseWindow(self):
""" 折叠导航窗口 """
self.__isExpanded = False
self.__displayMode = self.COMPACT
self.navigationBar.show()
self.navigationWidget.hide()
if self.sender() is self.navigationMenu.showBarButton:
self.navigationMenu.aniHide()
elif self.sender() is self.navigationMenu.playingButton:
self.navigationMenu.hide()
self.resize(self.navigationBar.width(), self.height())
self.displayModeChanged.emit(self.__displayMode)
def setOverlay(self, isOverlay: bool):
""" 设置展开导航界面时是否为overlay显示模式 """
self.__isOverlay = isOverlay
def __selectedButtonChangedSlot(self, name):
""" 选中的按钮变化对应的槽函数 """
for widget in self.__navigation_list:
if not (widget is self.sender()):
widget.setSelectedButton(name)
def setCurrentIndex(self, index: int):
""" 选中下标对应的按钮 """
for widget in self.__navigation_list:
widget.setCurrentIndex(index)
def updateWindow(self):
""" 更新窗口 """
self.navigationMenu.updateWindow()
self.navigationWidget.updateWindow()
@property
def isOverlay(self):
return self.__isOverlay
@property
def isExpanded(self):
return self.__isExpanded
@property
def displayMode(self):
return self.__displayMode
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.