blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c2bdb7c3c1f0ffd2ca09b91c2b25d6b3bd6dc4c | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/effects/DarkPortal.py | 0cf27ecf8deadbcfe7623641ee7db325b3b0db04 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | from panda3d.core import TransparencyAttrib
# File: D (Python 2.4)
from direct.interval.IntervalGlobal import *
from PooledEffect import PooledEffect
from EffectController import EffectController
from otp.otpbase import OTPRender
import random
class DarkPortal(PooledEffect, EffectController):
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.speed = 0.75
self.holdTime = 2.5
self.size = 40
self.explosionSequence = 0
self.explosion = loader.loadModel('models/effects/darkPortal')
self.explosion.setDepthTest(0)
self.setDepthWrite(0)
self.explosion.setFogOff()
self.explosion.setLightOff()
self.explosion.setHpr(0, -90, 0)
self.explosion.reparentTo(self)
self.hide()
self.explosion.hide(OTPRender.MainCameraBitmask)
self.explosion.showThrough(OTPRender.EnviroCameraBitmask)
self.explosion.setBin('shadow', 0)
self.explosion.setTransparency(TransparencyAttrib.MAlpha)
self.explosion.setDepthWrite(0)
def createTrack(self, rate = 1):
self.explosion.setScale(1)
self.explosion.setColorScale(1, 1, 1, 0.75)
scaleUp = self.explosion.scaleInterval(self.speed, self.size, startScale = 0.0, blendType = 'easeIn', other = render)
scaleDown = self.explosion.scaleInterval(self.speed, 0.0, startScale = self.size, blendType = 'easeIn', other = render)
self.track = Sequence(Func(self.show), scaleUp, Wait(self.holdTime), scaleDown, Func(self.hide), Func(self.cleanUpEffect))
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| [
"[email protected]"
] | |
93048f8876fc96b4c7fd4bda1e6719756d628118 | 222d4f2dfb63a66b5de274b785cb92393a2e0fe9 | /after6pm_191113/04.py | 17381eb6ff56e9032c7c90fbf870c88dae44464e | [] | no_license | GyuReeKim/PycharmProjects | fd2584c3ff1369510a7f246f2089cefb77035d9d | dd4f0e15b4e72c68b054489c54f24fa0ba5b9ed3 | refs/heads/master | 2020-07-03T11:44:54.951147 | 2019-11-21T00:07:43 | 2019-11-21T00:07:43 | 201,894,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # 연구소
# 벽세우기
import sys
sys.stdin = open('04.txt', 'r')
def f(i, j, lab):
pass
N, M = map(int, input().split())
lab = [list(map(int, input().split())) for _ in range(N)]
print(lab)
f(0, 0, lab) | [
"[email protected]"
] | |
524b26645d22e5350ca96393ae4a8f8c7410257e | 4c76dbfaa8f2ca33945e303be90b579c79bd4008 | /renesola/apps/freight/management/commands/build_angular_js.py | 50d8bdd16a9ceccc64c3c8823bb5058badf95821 | [] | no_license | fogcitymarathoner/renasola | 42c32243df4e4c1246d9a85cfb9251aed2264309 | 9089dcc0ffc57a76799f5e99244df644256e08ea | refs/heads/master | 2021-01-11T00:32:40.358446 | 2016-10-10T18:49:50 | 2016-10-10T18:49:50 | 70,517,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | __author__ = 'marc'
from django.core.management.base import BaseCommand
from django.conf import settings
import os
from renesola_lib.angular_helpers import build_js
class Command(BaseCommand):
"""
field delimiter ';'
text delmiter '"'
"""
args = ''
help = ''
def handle(self, *args, **options):
build_js()
| [
"[email protected]"
] | |
d54db077ad045ae5605a1a04b178f9fac106b3ab | 30a456e3012c663782d2a07a0ff67c377d63790d | /data/ass2json.py | 08561f2f6f0db1ff52593268932f24b680e40cf8 | [
"MIT"
] | permissive | akx/my-video | 41099725fd96f369a1e8e671667e2e7be3256f42 | b1135809f81a34026536d1a8532390dc5f1c7945 | refs/heads/master | 2021-01-24T10:30:53.608241 | 2016-10-01T18:30:34 | 2016-10-01T18:30:34 | 69,733,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import argparse
import re, json
import sys
K_RE = re.compile(r'(\{\\k([0-9]+)\})')
def parse_time(dt):
h, m, s, hs = [float(int(p, 10)) for p in re.split('[:.,]', dt)]
return h * 60 * 60 + m * 60 + s * 1 + hs / 100.0
def parse_ass(infp):
for line in infp:
if not line.startswith('Dialogue:'):
continue
line = line.split(',', 9)
start = parse_time(line[1])
end = parse_time(line[2])
parts = K_RE.split(line[-1])[1:]
word_durations = zip([int(s, 10) / 100.0 for s in parts[1::3]], [s.strip() for s in parts[2::3]])
for i, (dur, word) in enumerate(word_durations):
d = {
'time': round(start, 3),
'word': word,
}
if i == 0:
d['verse'] = True
yield d
start += dur
def main():
ap = argparse.ArgumentParser()
ap.add_argument('file', type=argparse.FileType())
ap.add_argument('-o', '--output', type=argparse.FileType('w'), default=None)
ap.add_argument('--indent', default=None, type=int)
args = ap.parse_args()
json.dump(
list(parse_ass(args.file)),
(args.output or sys.stdout),
indent=args.indent,
)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cb7fb08c690282edfd833933070c697f756dcb10 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/ADD/ADDmonoPhoton_MD_1_d_8_TuneCUETP8M1_13TeV_pythia8_cfi.py | 302e89726365a986e9049cc298156fb6aa79d2a4 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,425 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring( ## see details on http://home.thep.lu.se/~torbjorn/php8135/ExtraDimensionalProcesses.php?filepath=files/
'ExtraDimensionsLED:ffbar2Ggamma = on',
'ExtraDimensionsLED:CutOffmode = 1',
'ExtraDimensionsLED:t = 0.5',
'ExtraDimensionsLED:n = 8',
'ExtraDimensionsLED:MD = 1000.',
'ExtraDimensionsLED:LambdaT = 1000.',
'5000039:m0 = 1200.',
'5000039:mWidth = 1000.',
'5000039:mMin = 1.',
'5000039:mMax = 13990.',
'PhaseSpace:pTHatMin = 130.'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',)
)
)
| [
"[email protected]"
] | |
d4b86ce8c2dcf80ee5ce28fd7f9795ce4bfad495 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /93o6y6WKFpQKoDg4T_18.py | 69dd497b116184ce41ce029c7571c5d85d204ef8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py |
def sort_by_length(lst):
return sorted(lst, key=len)
| [
"[email protected]"
] | |
45aad19c79479fd3824ea58eb7d7717279b0b008 | 6858cbebface7beec57e60b19621120da5020a48 | /ply/modimport.py | f82d08c44b979f0b39be6a4dfe34acf53fbfc6e1 | [] | no_license | ponyatov/PLAI | a68b712d9ef85a283e35f9688068b392d3d51cb2 | 6bb25422c68c4c7717b6f0d3ceb026a520e7a0a2 | refs/heads/master | 2020-09-17T01:52:52.066085 | 2017-03-28T07:07:30 | 2017-03-28T07:07:30 | 66,084,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | >>> import tokrules
>>> lexer = lex.lex(module=tokrules)
>>> lexer.input("3 + 4")
>>> lexer.token()
LexToken(NUMBER,3,1,1,0)
>>> lexer.token()
LexToken(PLUS,'+',1,2)
>>> lexer.token()
LexToken(NUMBER,4,1,4)
>>> lexer.token()
None
>>> | [
"[email protected]"
] | |
4d620495621fd8734bc2f5085f0814fab0602439 | db6d37fcf5545acd3dd9910674c0f43c90410e0a | /iterminal/controllers.py | 88f3f762f83c05c23a9cf06bbd3546a14f2d520a | [] | no_license | capalmer1013/i | 629bb44b4640fc91be883ca2e47c6a3d81f51a0b | 4e0bc895ad232cad7dfefefec35a67346da6794b | refs/heads/master | 2023-02-23T02:35:44.270400 | 2022-04-27T03:04:21 | 2022-04-27T03:04:21 | 86,883,795 | 0 | 0 | null | 2023-02-16T00:32:58 | 2017-04-01T04:29:44 | Python | UTF-8 | Python | false | false | 519 | py | import curses
from iterminal.constants import UP, DOWN, LEFT, RIGHT
def inputController(stdscr, p):
while True:
key = stdscr.getch()
#stdscr.addstr(0, 0, str(key))
dirDict = {curses.KEY_UP: UP, curses.KEY_DOWN: DOWN, curses.KEY_LEFT: LEFT, curses.KEY_RIGHT: RIGHT}
shootDict = {ord('w'): UP, ord('a'): LEFT, ord('s'): DOWN, ord('d'): RIGHT}
if key in dirDict.keys():
p.move(dirDict[key])
elif key in shootDict.keys():
p.shoot(shootDict[key]) | [
"[email protected]"
] | |
5588811602468117dcf4c2c815b823cd9c66efd6 | 0bb474290e13814c2498c086780da5096453da05 | /abc151/C/main.py | de4737e84810f2b025becf5752de28655a3a7833 | [] | no_license | ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | Python | UTF-8 | Python | false | false | 1,054 | py | #!/usr/bin/env python3
import sys
sys.setrecursionlimit(10000000)
INF = 1<<32
def solve(N: int, M: int, p: "List[int]", S: "List[str]"):
dp = [[0, 0] for i in range(N+1)]
for i in range(M):
if S[i] == 'AC':
dp[p[i]][0] = 1
else:
if dp[p[i]][0] == 0:
dp[p[i]][1] += 1
ac = len([dp[i][0] for i in range(1, N+1) if dp[i][0] > 0])
wa = sum([dp[i][1] for i in range(1, N+1) if dp[i][0] > 0])
# print(dp[:10])
# print([dp[i][0] for i in range(1, N+1)])
print(ac, wa)
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
M = int(next(tokens)) # type: int
p = [int()] * (M) # type: "List[int]"
S = [str()] * (M) # type: "List[str]"
for i in range(M):
p[i] = int(next(tokens))
S[i] = next(tokens)
solve(N, M, p, S)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f17025743fc841a91077662b31a3cb066d361be2 | a5e5d39f42f468d35f18aab3e78c3c090046b0df | /apps/contacts/forms.py | 72c512374bed6f2e74a37ac9c50a2a1151e9ee6e | [] | no_license | WayneLambert/portfolio | 66198dfc18b3f254e6bc726575903c3e8f570dc4 | 7e02165386e4784f81e15bae0325a77cf45f410d | refs/heads/main | 2023-02-04T18:08:13.559223 | 2023-01-29T14:13:59 | 2023-01-29T14:13:59 | 180,239,669 | 5 | 1 | null | 2023-02-04T07:07:10 | 2019-04-08T22:02:22 | JavaScript | UTF-8 | Python | false | false | 568 | py | from django import forms
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3
from apps.contacts.models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name', 'email', 'message')
captcha = ReCaptchaField(
widget=ReCaptchaV3(
attrs={
'data-theme': 'light',
'data-size': 'invisible',
}
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| [
"[email protected]"
] | |
9eeb6493e7ffc4de7c553d77979a09da3caeaa1e | 8e1668e35a8df9968ab14d16db089b51dbe6dd51 | /python/algorithms/contests/four_divisors.py | 77b0e61ed7442f35a879a90753b56c9b384e7f7b | [] | no_license | Chalmiller/competitive_programming | f1ec0184d1ff247201522ab90ca8e66b3f326afc | b437080d1ba977c023baf08b7dc5c3946784e183 | refs/heads/master | 2021-03-24T05:11:59.383916 | 2020-08-24T22:07:41 | 2020-08-24T22:07:41 | 247,519,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from typing import *
class Solution:
def sumFourDivisors(self, nums: List[int]) -> int:
divisors = 0
for i in nums:
num_divisor = []
for j in range(i+1):
if len(num_divisor) > 4:
break
if i%(j+1) == 0:
num_divisor.append(j+1)
if len(num_divisor) == 4:
sum_divisors = sum(num_divisor)
divisors += sum_divisors
return divisors
nums = [21,4,7]
obj = Solution()
obj.sumFourDivisors(nums)
| [
"[email protected]"
] | |
854a857b9eedc99be8a2332e23c37f43e09f4bc4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /FjiriGn8gc5RE8Xm2_7.py | efeef575fedcd049a250bbc0cfb0345e324e582a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | """
Write a function that takes `fuel` (liters), `fuel_usage` (liters/100km),
`passengers`, `air_con` (boolean) and returns maximum distance that car can
travel.
* `fuel` is the number of liters of fuel in the fuel tank.
* `fuel_usage` is basic fuel consumption per 100 km (with the driver inside only).
* Every additional passenger is increasing basic fuel consumption by 5%.
* If the air conditioner is ON `True`, its increasing total (not basic) fuel consumption by 10%.
### Examples
total_distance(70.0, 7.0, 0, False) ➞ 1000.0
total_distance(36.1, 8.6, 3, True) ➞ 331.8
total_distance(55.5, 5.5, 5, false) ➞ 807.3
### Notes
* `fuel` and `fuel_usage` are always greater than 1.
* `passengers` are always greater or equal to 0.
* Round your answer to the nearest tenth.
"""
def total_distance(fuel, fuel_usage, passengers, air_con):
air = 0
if air_con:
air = 1
return round((1000*fuel)/(fuel_usage*((0.05*passengers+1)*(air+10))),1)
| [
"[email protected]"
] | |
b65a0d35db9f6025977c8d15e05fa76d1205eabb | ebfa76d74294fa64648146e6726cc69d3e25e23f | /parlai/mturk/tasks/light/light_chat_eval/worlds.py | 1ecac1421555897bf2765a369fcd6ba4428df1e3 | [
"MIT"
] | permissive | ritvik1512/ContrastiveLearning4Dialogue | 17b770a8c777aca9f5be273ff41f02c4530f8ff3 | 873c20d21ee810750179356c353d2cce244db028 | refs/heads/master | 2023-03-12T15:17:43.635546 | 2021-02-24T07:20:27 | 2021-02-24T07:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,029 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
from parlai.mturk.core.agents import (
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
)
import time
def is_disconnected(act):
return 'text' in act and act['text'] in [
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
]
class LightEvalTestWorld(MTurkOnboardWorld):
"""
Task world that gives a pre-determined task as a test.
Assigns a blocking qualification if the worker fails the test.
"""
GESTURES = list(
map(
lambda x: 'gesture ' + x,
[
'applaud',
'blush',
'cry',
'dance',
'frown',
'gasp',
'grin',
'groan',
'growl',
'yawn',
'laugh',
'nod',
'nudge',
'ponder',
'pout',
'scream',
'shrug',
'sigh',
'smile',
'stare',
'wave',
'wink',
],
)
)
block_act = {'id': 'System', 'text': "FAILED", 'task_data': {'turn': 'FAILED'}}
def block_loop(self):
print('Worker {} failed onboarding'.format(self.mturk_agent.worker_id))
self.mturk_agent.observe(self.block_act)
self.mturk_agent.mturk_manager.soft_block_worker(self.mturk_agent.worker_id)
act = self.mturk_agent.act()
while not is_disconnected(act):
self.mturk_agent.observe(self.block_act)
act = self.mturk_agent.act()
return True
def __init__(self, opt, mturk_agent):
self.mturk_agent = mturk_agent
self.opt = opt
self.did_complete = False
self.wrong = 0
self.episodeDone = False
def parley(self):
self.mturk_agent.update_agent_id('TestEmote')
first_act = {
'id': 'System',
'text': 'FIRST_TURN',
'task_data': {
'wrong': 0,
'turn': 'FIRST_TURN',
'actions': self.GESTURES,
'agent_id': 'Guard',
'text': 'Bahahaha that\'s a great one! Where\'d you get that from?',
'persona': 'I\'m a guard of the royal family. I have a loud laugh, '
'and people hear it often as I love jokes. I stand up for '
'rightousness, and have a short temper when it comes to '
'insults against the king. Sometimes you need to knock '
'some sense into people.',
'base_name': 'Guard',
'partner_name': 'Jester',
'setting': 'You are in the servants\' quarters. Many people are '
'sitting around waiting to be called for services. It\'s '
'cozy, but not cramped. A chest is here. A Jester is here. '
'You are carrying a spear.',
},
}
self.mturk_agent.observe(first_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
while act['text'] != 'gesture laugh':
self.wrong += 1
if self.wrong > 3:
return self.block_loop()
first_act['task_data']['wrong'] = self.wrong
self.mturk_agent.observe(first_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.mturk_agent.update_agent_id('TestSpeech')
correct_phrase = (
'Now you better watch your tongue Jester. '
'I won\'t have you badmouthing our king.'
)
second_act = {
'id': 'System',
'text': 'SECOND_TURN',
'task_data': {
'wrong': 0,
'turn': 'SECOND_TURN',
'curr_message_context': {'action': 'gesture frown'},
'actions': [
'You think you can say whatever you want because we\'re alone?',
'Do you want to grab some tea?',
'What makes you think you can stand up to me, silly man? I have three times your strength. I have weapons to the teeth. What would make you think this was a good idea?', # NOQA
'Yeah that guy is something of a jerk',
'I just feel he doesn\'t have the best sense of humor...',
'Yeah landlubber, aye find this is a great hiding spot too.',
'If only you could say that to my face one more time. I\'ve missed you too much...', # NOQA
'One more beer for the gang? I feel like you would be the type to have plenty to drink.', # NOQA
'The servants quarters are pretty tightly packed aren\'t they?',
'I hate being an archer...',
correct_phrase,
'Once upon a time I lived for that king, but nowadays I feel like I could go without him. Thats why I\'m here in the servants quarters.', # NOQA
'Hey there little fella, do you think you can get me some food?',
'I know you want more than just some of our wares, I\'m selling everything.', # NOQA
'One more song! I know you know a few more of them!',
'If that isn\'t a good joke, I don\'t know what is? Hahahahaha',
'Three fort nights too late, I will not stand for this! You should have been here sooner!', # NOQA
'Aw sweetheart, I just want you to know how much I care.',
'I have no spells for you! My wizardry is just for me and my acolytes.', # NOQA
'How did you find out the kinds of jokes that the king likes so much?', # NOQA
],
},
}
self.mturk_agent.observe(second_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
while act['text'] != correct_phrase:
self.wrong += 1
if self.wrong > 3:
return self.block_loop()
second_act['task_data']['wrong'] = self.wrong
self.mturk_agent.observe(second_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.mturk_agent.update_agent_id('TestAct')
third_act = {
'id': 'System',
'text': 'THIRD_TURN',
'task_data': {
'wrong': 0,
'turn': 'THIRD_TURN',
'text': 'You gotta get your senses straight. Hyah! '
'Consider this a warning...',
'actions': [
'drop spear',
'wield spear',
'hug Jester',
'examine chest',
'get coins from chest',
'hit Jester',
'steal ball from Jester',
],
},
}
self.mturk_agent.observe(third_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
if act['text'] != 'hit Jester':
self.wrong += 1
if self.wrong > 3:
return self.block_loop()
third_act['task_data']['wrong'] = self.wrong
self.mturk_agent.observe(third_act)
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.did_complete = True
self.mturk_agent.observe(
{
'id': 'System',
'text': 'FINAL_TURN',
'task_data': {'turn': 'FINAL_TURN', 'wrong': 0},
}
)
self.episodeDone = True
time.sleep(3)
return
class LightEvalTaskWorld(MTurkTaskWorld):
"""
Task world steps the worker through a conversation, giving them cands to select from
as if they are a retrieval model.
"""
def __init__(self, opt, mturk_agents, sample, use_train, max_wrong):
self.mturk_agent = mturk_agents[0]
self.sample_acts = sample
self.turn = 0
self.episodeDone = False
self.completed = False
self.selections = []
self.corrects = [
ex['labels'][0] if 'labels' in ex else ex['eval_labels'] for ex in sample
]
self.use_train = use_train
self.max_wrong = max_wrong
def extract_from_flag(self, text, flag):
return text.split(flag)[1]
def get_current_turn_context(self):
all_lines = []
for act in self.sample_acts[: self.turn]:
lines = act['text'].split('\n')
if lines[-1].startswith('_self'):
lines = lines[:-1]
all_lines += lines
lines = all_lines + self.sample_acts[self.turn]['text'].split('\n')
lines = list(filter(lambda x: len(x) > 0, lines))
setting_name = 'Setting withheld'
setting_desc = 'Setting description withheld'
self_name = 'Character withheld'
partner_name = 'Partner withheld'
self_persona = 'Persona withheld'
self_act = ''
self_text = 'Spoken text withheld'
messages = []
self_message = {}
partner_message = {}
# Handle current turn context separately
if lines[-1].startswith('_self'):
self_line = lines[-1]
lines = lines[:-1]
# Extract current turn context
if self_line.startswith('_self_say'):
self_text = self.extract_from_flag(self_line, '_self_say')
elif self_line.startswith('_self_act'):
self_act = self.extract_from_flag(self_line, '_self_act')
elif self_line.startswith('_self_emote'):
self_act = self.extract_from_flag(self_line, '_self_emote')
# Construct the rest of the context
for line in lines:
if line.startswith('_setting_name'):
setting_name = self.extract_from_flag(line, '_setting_name')
elif line.startswith('_setting_desc'):
setting_desc = self.extract_from_flag(line, '_setting_desc')
elif line.startswith('_partner_name'):
partner_name = self.extract_from_flag(line, '_partner_name')
elif line.startswith('_self_name'):
self_name = self.extract_from_flag(line, '_self_name')
elif line.startswith('_self_persona'):
self_persona = self.extract_from_flag(line, '_self_persona')
elif line.startswith('_partner'):
if 'id' in self_message:
messages.append(self_message)
self_message = {}
if line.startswith('_partner_say'):
partner_message['id'] = partner_name
partner_message['text'] = self.extract_from_flag(
line, '_partner_say'
)
if line.startswith('_partner_act'):
partner_message['task_data'] = {
'action': self.extract_from_flag(line, '_partner_act')
}
if line.startswith('_partner_emote'):
partner_message['task_data'] = {
'action': 'gesture '
+ self.extract_from_flag(line, '_partner_emote')
}
elif line.startswith('_self'):
if 'id' in partner_message:
messages.append(partner_message)
partner_message = {}
if line.startswith('_self_say'):
self_message['id'] = self_name
self_message['text'] = self.extract_from_flag(line, '_self_say')
if line.startswith('_self_act'):
self_message['task_data'] = {
'action': self.extract_from_flag(line, '_self_act')
}
if line.startswith('_self_emote'):
self_message['task_data'] = {
'action': 'gesture '
+ self.extract_from_flag(line, '_self_emote')
}
if 'id' in partner_message:
messages.append(partner_message)
act = {
'id': 'System',
'text': 'TASK_DATA',
'task_data': {
'actions': sorted(self.sample_acts[self.turn]['label_candidates']),
'text': self_text,
'curr_message_context': {'action': self_act},
'agent_id': self_name,
'base_name': self_name,
'persona': self_persona,
'partner_name': partner_name,
'setting': setting_desc,
'setting_name': setting_name,
'messages': messages,
},
}
return act
def parley(self):
self.mturk_agent.observe(self.get_current_turn_context())
act = self.mturk_agent.act()
if is_disconnected(act):
self.episodeDone = True
return
self.selections.append(act['text'])
self.turn += 1
if self.turn == len(self.sample_acts):
self.episodeDone = True
self.completed = True
wrong = 0
if self.use_train:
for i in range(len(self.selections)):
if self.selections[i] != self.corrects[i]:
wrong += 1
if wrong > self.max_wrong:
self.completed = False
self.mturk_agent.mturk_manager.soft_block_worker(
self.mturk_agent.worker_id
)
print('Worker failed in train', self.mturk_agent.worker_id)
def episode_done(self):
return self.episodeDone
def shutdown(self):
self.mturk_agent.shutdown()
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'selections': self.selections,
'corrects': self.corrects,
'episode': self.sample_acts,
'training': self.use_train,
}
| [
"[email protected]"
] | |
759453a9942cf164858e0646369370b634ed8630 | 751300a23242cfe393f86ff489339ffc81319efc | /speaker_spotting/speaker_spotting_oracle_cluster2-dev.py | 9ee8cd16ac65ab6ad961b195a92ffb3714d90be2 | [] | no_license | yinruiqing/speaker_spotting | bc349791a59c6caa2a840fb39aa1d4c1221f99e9 | c2fbdcbf2885d9545abe8bf1e19b2c412b0680ee | refs/heads/master | 2021-05-04T14:39:33.213405 | 2018-05-03T10:28:11 | 2018-05-03T10:28:11 | 120,207,231 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,962 | py |
# coding: utf-8
# ```bash
# $ pip install pyannote.metrics==1.4.1
# $ pip install pyannote.db.odessa.ami==0.5.1
# ```
import clustering
import numpy as np
from pyannote.audio.features import Precomputed
precomputed = Precomputed('/vol/work1/bredin/speaker_spotting/embeddings')
from pyannote.database import get_protocol, FileFinder
protocol = get_protocol('AMI.SpeakerSpotting.MixHeadset', progress=True)
# enrolment consists in summing all relevant embeddings
def speaker_spotting_enrol(current_enrolment):
enrol_with = current_enrolment['enrol_with']
embeddings = precomputed(current_enrolment)
return np.sum(embeddings.crop(enrol_with), axis=0, keepdims=True)
models = {}
for current_enrolment in protocol.development_enrolment():
model_id = current_enrolment.pop('model_id')
models[model_id] = speaker_spotting_enrol(current_enrolment)
REFERENCE = {}
for current_file in protocol.development():
uri = current_file['uri']
if uri not in REFERENCE:
REFERENCE[uri] = Annotation(uri=uri)
REFERENCE[uri].update(current_file['annotation'])
# Trials
from pyannote.core import SlidingWindow, SlidingWindowFeature
from pyannote.audio.embedding.utils import cdist
from pyannote.core import Annotation,Segment, Timeline
# trial consists in comparing each embedding to the target embedding
def speaker_spotting_try_system2(current_trial):
""" speaker spotting system based on the oracle
clustering system
"""
# target model
# record the model embedding vector
# and model id
model = {}
model_id = current_trial['model_id']
model_embedding = models[current_trial['model_id']]
model['mid'] = model_id
model['embedding'] = model_embedding
# where to look for this target
try_with = current_trial['try_with']
# precomputed embedding
embeddings = precomputed(current_trial)
# annotation of current file
oracle_diarization = REFERENCE[current_trial['uri']].crop(current_trial['try_with'])
# find index of first and last embedding fully included in 'try_with'
indices = embeddings.sliding_window.crop(try_with, mode='strict')
first, last = indices[0], indices[-1]
onlineOracleClustering = clustering.OnlineOracleClustering(current_trial['uri'])
start = embeddings.sliding_window[0].start
data = np.zeros((len(embeddings.data), 1))
for i, (window, _) in enumerate(embeddings):
# make sure the current segment is in 'try_with'
if i < first:
start = window.end
continue
if i > last:
break
so_far = Segment(start, window.end)
current_annotation = oracle_diarization.crop(so_far)
score = 0.
for segment, _, label in current_annotation.itertracks(label=True):
example = {}
example['label'] = label
example['segment'] = segment
example['embedding'] = embeddings.crop(segment, mode='center')
example['indice'] = [i]
# compute the distance with model
example['distances'] = {}
example['distances'][model['mid']] = list(cdist(example['embedding'],
model['embedding'],
metric='cosine').flatten())
# update the online oracle clustering
onlineOracleClustering.upadateCluster(example)
if not onlineOracleClustering.empty():
# compute the current score
min_dist = min(onlineOracleClustering.modelDistance(model))
score = max(score, 2-min_dist)
data[i] = score
start = window.end
# transform scores to sliding window features
data = data[first:last+1]
sliding_window = SlidingWindow(start=embeddings.sliding_window[first].start,
duration=embeddings.sliding_window.duration,
step=embeddings.sliding_window.step)
return SlidingWindowFeature(data, sliding_window)
# Depending on the value of the detection threshold, the alarm will be triggered with a different latency.
def process_score(scores):
min_score = 0
res = []
for (window, score) in scores:
if score > min_score:
res.append([window.end, score[0]])
min_score = score[0]
return res
def process_trial(trial, scores):
res = {}
pscores = process_score(scores)
res['uri'] = trial['uri']
res['model_id'] = trial['model_id']
res['scores'] = pscores
return res
llss = []
for current_trial in protocol.development_trial():
reference = current_trial.pop('reference')
hypothesis = speaker_spotting_try_system2(current_trial)
llss.append(process_trial(current_trial, hypothesis))
import simplejson as json
with open('llss.txt', 'w') as outfile:
json.dump(llss, outfile)
| [
"[email protected]"
] | |
537b1e6af4b96fd09dba3bd4344c38fb66b9ca65 | d4e9a392d7465a5c10417364dd91cd5dd3c5d935 | /app/preprocess.py | d0fbfc80e4a532a5803e9d7632c2c1743c42d9e6 | [] | no_license | MaayanLab/harmonizome-ml | 045f866bac4683a23dd8a393e48f9f09bb08c35d | 5cebd194d771b1d7eabeb65a1c81ce0c78bf7a80 | refs/heads/master | 2020-03-21T13:26:26.132737 | 2020-03-05T22:46:38 | 2020-03-05T22:46:38 | 138,605,770 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | #!/usr/bin/env python
import os
import nbformat
from flask import render_template
from . import app
from .model import build_fields
from .runtime import ipynb_import_from_file
from .template.nbtemplate_parse import parse_fields
from .util import app_dir, globalContext
@app.template_filter('filter')
def reverse_filter(arr, attr, val):
def maybe_eval(v):
if callable(v):
return v()
return v
return [v
for v in arr
if maybe_eval(getattr(v, attr)) == val]
def main():
with app.test_request_context('/'):
for _, _, files in os.walk(app_dir + '/templates/ipynb/'):
for file in files:
file, ext = os.path.splitext(file)
if ext != '.ipynb':
continue
print('Building %s...' % (file))
nb = ipynb_import_from_file(
app_dir + '/templates/ipynb/%s.ipynb' % (file)
)
context = dict(
filename=file,
**globalContext,
**build_fields(),
)
fields = [field
for cell in nb.cells
for field in parse_fields(
cell['source'],
context,
)]
form_out = open(app_dir + '/templates/%s.html' % (file), 'w')
try:
if os.path.isfile(app_dir + '/templates/ipynb/%s.html' % (file)):
# Custom template
print(
render_template('ipynb/%s.html' % (file),
**context,
fields=fields,
),
file=form_out,
)
else:
# General template
print(
render_template('layout/ipynb.j2',
**context,
fields=fields,
),
file=form_out,
)
except Exception as e:
print(e)
finally:
form_out.close()
break
| [
"[email protected]"
] | |
a99dbdf037c0559627072edbf0cd2f7e24983bb2 | 01f77b70dfb8817a913414fd25d9ed44ba3cd1f4 | /oscar_invoices/urls.py | 1bc931c736f24795068621e2e1d47790be762a5e | [] | no_license | luiz158/django-oscar-invoices | ca2cf8b70347000399c5316532aca7e52d0f77a3 | 9cc3425410641a95832bda93155e4d2bfa95ac7e | refs/heads/master | 2023-07-02T22:21:03.318698 | 2020-10-06T16:01:02 | 2020-10-06T16:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from django.urls import re_path
from . import views
app_name = "oscar_invoices"
urlpatterns = [
re_path(r"invoice/(?P<pk>\d+)/", views.InvoicePreviewView.as_view(), name="invoice"),
]
| [
"[email protected]"
] | |
40c7a96a66c6ce84439222e54679cc51149bc0ba | a86293a2033c06410aa8ed19bcbce8ca55ea3c55 | /src/client_libraries/python/dynamics/customerinsights/api/models/cds_org_info.py | e414e4e2f31a4ed4afa9f160f9258d839d0aa435 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ramotheonly/Dynamics365-CustomerInsights-Client-Libraries | a3ca28aa78d2b5509e65d9895ff4a0d42d05f611 | e00632f7972717b03e0fb1a9e2667e8f9444a0fe | refs/heads/main | 2023-08-02T08:09:04.063030 | 2021-09-28T22:42:15 | 2021-09-28T22:42:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CdsOrgInfo(Model):
"""The information for CDS Organization in BAP.
:param friendly_name: Gets the Cds Organization Friendly Name
:type friendly_name: str
:param url: Gets the Cds Organization Url
:type url: str
:param state: Gets the Cds Organization State
:type state: str
:param location: Gets region location of Cds Organization
:type location: str
:param environment_sku: Gets SKU of Cds Organization
:type environment_sku: str
:param expiration_time: Gets the expiration time of CDS Organization if
the SKU is Trial
:type expiration_time: datetime
:param max_allowed_expiration_time: Gets the max allowed expiration time
of CDS Organization if the SKU is Trial
:type max_allowed_expiration_time: datetime
"""
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'environment_sku': {'key': 'environmentSku', 'type': 'str'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'max_allowed_expiration_time': {'key': 'maxAllowedExpirationTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(CdsOrgInfo, self).__init__(**kwargs)
self.friendly_name = kwargs.get('friendly_name', None)
self.url = kwargs.get('url', None)
self.state = kwargs.get('state', None)
self.location = kwargs.get('location', None)
self.environment_sku = kwargs.get('environment_sku', None)
self.expiration_time = kwargs.get('expiration_time', None)
self.max_allowed_expiration_time = kwargs.get('max_allowed_expiration_time', None)
| [
"[email protected]"
] | |
b92defed3b5e8993f941de86c1d080d39aa48810 | c73beb04d101ca8d98c9126b1c47b4f19cc35066 | /week1/calculator.py | f7f372af8b41c269b4a182934923f6716834ac12 | [] | no_license | fywest/python | a5ecf62e1f8cdf59c936da81b478c371f169aec4 | cd97438679d8e129b3cb75d76226b16e7e7850ac | refs/heads/master | 2022-12-13T06:15:04.021492 | 2019-05-28T19:21:18 | 2019-05-28T19:21:18 | 130,403,136 | 0 | 0 | null | 2022-12-08T05:08:55 | 2018-04-20T19:02:57 | Python | UTF-8 | Python | false | false | 929 | py | import sys
if __name__=='__main__':
if len(sys.argv)<2:
print("please input salary amount")
exit(1)
print(sys.argv[1])
try:
amount=int(sys.argv[1])
tax=0.0
amount_fortax=0.0
amount_fortax=amount-0-3500
if amount_fortax<=0:
tax=0;
elif amount_fortax>80000:
tax=amount_fortax*0.45-13505
elif amount_fortax>55000:
tax=amount_fortax*0.35-5505
elif amount_fortax>35000:
tax=amount_fortax*0.30-2755
elif amount_fortax>9000:
tax=amount_fortax*0.25-1005
elif amount_fortax>4500:
tax=amount_fortax*0.20-555
elif amount_fortax>1500:
tax=amount_fortax*0.1-105
else:
tax=amount_fortax*0.03-0
print("{0:.2f}".format((tax)))
exit(0)
except ValueError:
print("Parameter Error")
exit(1)
| [
"[email protected]"
] | |
ad796b01f49b7944d7c81a65fdb929ca1235c040 | 64ec8731553aa08c33373b212bbe431b1a23b97c | /test/util/util_spatial.py | 74e2b2692deec5adc94efe1ca8e6186db7ba6e48 | [
"BSD-3-Clause",
"MIT"
] | permissive | ChetanNathwani/pyrolite | 98947fde265b25beea839f24495d68bbdb726eed | 8de9c67855305115517418e127bf26de84ff062d | refs/heads/master | 2023-07-26T18:57:28.024540 | 2021-07-08T09:19:02 | 2021-07-08T09:19:02 | 367,300,779 | 0 | 0 | NOASSERTION | 2021-05-14T09:23:47 | 2021-05-14T08:35:50 | null | UTF-8 | Python | false | false | 7,785 | py | import unittest
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import cartopy.crs as ccrs
HAVE_CARTOPY = True
except ImportError:
HAVE_CARTOPY = False
from pyrolite.util.spatial import *
from pyrolite.util.math import isclose # nan-equalling isclose
class TestGreatCircleDistance(unittest.TestCase):
def setUp(self):
self.ps = zip(
np.array(
[
([0, 0], [0, 0]), # should be 0
([-170, 0], [170, 0]), # should be 20
([0, -90], [0, 90]), # should be 180
([-45, 0], [45.0, 0.0]), # should be 90
([-90, -90], [90.0, 90.0]), # should be 180
([-90, -45], [90.0, 45.0]), # should be 180, rotation of above
([-90, -0], [90.0, 0.0]), # should be 180, rotation of above
([-60, 20], [45.0, 15.0]),
([-87.0, 67.0], [34, 14]),
([-45, -45], [45.0, 45.0]),
([-45, -30], [45.0, 30.0]),
]
),
[0, 20, 180, 90, 180, 180, 180, None, None, None, None],
)
def test_default(self):
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect):
distance = great_circle_distance(*ps)
distance_r = great_circle_distance(*ps[::-1])
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
"""
ax = plt.subplot(111, projection=ccrs.Mollweide()) # ccrs.Orthographic(0, 0))
ax.figure.set_size_inches(8, 8)
ax.stock_img()
ax.plot(
*np.array([*ps]).T,
color="blue",
marker="o",
transform=ccrs.Geodetic()
)
ax.plot(*np.array([*ps]).T, color="gray", transform=ccrs.PlateCarree())
plt.text(
**np.array([*ps])[0] + [5, 5],
"{:2.0f}".format(distance),
horizontalalignment="left",
fontsize=10,
transform=ccrs.Geodetic()
)
plt.show()"""
def test_absolute(self):
for ps, expect in self.ps:
for absolute in [True, False]:
with self.subTest(ps=ps, expect=expect, absolute=absolute):
distance = great_circle_distance(*ps, absolute=absolute)
distance_r = great_circle_distance(*ps[::-1], absolute=absolute)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
def test_degrees(self):
for ps, expect in self.ps:
for degrees in [True, False]:
with self.subTest(ps=ps, expect=expect, degrees=degrees):
if not degrees:
ps = np.deg2rad(
ps
) # convert to radians to give sensible output
distance = great_circle_distance(*ps, degrees=degrees)
distance_r = great_circle_distance(*ps[::-1], degrees=degrees)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_Vicenty(self):
method = "vicenty"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_haversine(self):
method = "haversine"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_cosines(self):
method = "cosines"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
class TestPieceWise(unittest.TestCase):
def test_pieces(self):
x1, x2 = 0.0, 10.0
segment_ranges = [(x1, x2)]
for segments in [1, 2, 3]:
with self.subTest(segments=segments):
result = list(piecewise(segment_ranges, segments=segments))
self.assertTrue(len(result) == segments)
def test_multiple_ranges(self):
x1, x2 = 0.0, 10.0
segment_ranges = [(x1, x2), (x2, x1), (x1, x2)]
segments = 2
result = list(piecewise(segment_ranges, segments=segments))
self.assertTrue(len(result) == segments ** len(segment_ranges))
class TestSpatioTemporalSplit(unittest.TestCase):
def test_split(self):
x1, x2 = 0, 10
segments = 2
params = dict(age=(0, 10), lat=(-10, 10), lo=(-90, 90))
result = list(spatiotemporal_split(segments=segments, **params))
self.assertTrue([isinstance(item, dict) for item in result])
self.assertTrue(len(result) == segments ** len(params))
class TestNSEW2Bounds(unittest.TestCase):
def setUp(self):
self.params = {
k: v
for (k, v) in zip(
["west", "south", "east", "north"], np.random.randint(1, 10, 4)
)
}
def test_conversion(self):
result = NSEW_2_bounds(self.params)
self.assertTrue(isinstance(result, list))
def test_order(self):
order = ["minx", "maxx", "miny", "maxy"]
result = NSEW_2_bounds(self.params, order=order)
self.assertTrue(result[1] == self.params["east"])
class TestLevenshteinDistance(unittest.TestCase):
def test_string(self):
pairs = [
("bar", "car"),
("bart", "car"),
("Saturday", "Sunday"),
("kitten", "sitting"),
]
expect = [1, 2, 3, 3]
for pair, exp in zip(pairs, expect):
with self.subTest(pair=pair, exp=exp):
dist = levenshtein_distance(*pair)
self.assertTrue(dist == exp)
def test_list(self):
pairs = [
([1, 2, 3], [1, 2, 2]),
(["A", "B", "C"], ["A", "B"]),
(["A", "B", "C", "D"], ["A", "E", "C"]),
]
expect = [1, 1, 2]
for pair, exp in zip(pairs, expect):
with self.subTest(pair=pair, exp=exp):
dist = levenshtein_distance(*pair)
self.assertTrue(dist == exp)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
a0cb1eee0ce7279e519465175cbaff109ed4fb60 | e3365a497b6f3afa7afc36381f7a7d1752f09610 | /.venv/bin/jupyter-notebook | 70ee2fde73f1c59914cde9b01c22c06f382ee6ce | [] | no_license | MohamadSheikhAlshabab/Chess_Board- | 4229f7044831b79a8b8b6662a2aea5753d11c7dc | ee2e69d4567b69559584d0b074d91a25793db2f7 | refs/heads/master | 2022-12-08T05:10:59.482582 | 2020-09-04T16:34:18 | 2020-09-04T16:34:18 | 291,529,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | #!/home/mohamad/401/chess_board/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from notebook.notebookapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
9ad30ee9734df856d50edf0d943d9924d00ca67a | 1c8bcd2d8e129a92e3328f47d2a452814c033327 | /kaggle/otto-group-product-classification-challenge/script_30.py | 2250ea4fb9cf07c4c72a3fb83dcb6c31ab8ca81f | [
"MIT"
] | permissive | josepablocam/janus-public | 425334706f9a4519534779b7f089262cf5cf0dee | 4713092b27d02386bdb408213d8edc0dc5859eec | refs/heads/main | 2023-03-08T15:21:12.461762 | 2021-02-25T20:53:02 | 2021-02-25T20:53:02 | 314,606,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
import lightgbm as lgb
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
dtypes = {f"feat_{i}": "int32" for i in range(1, 94)}
dtypes["id"] = "int32"
dtypes["target"] = "string"
df_train = pd.read_csv(
"/kaggle/input/otto-group-product-classification-challenge/train.csv",
dtype=dtypes
).set_index("id")
class_to_order = dict()
order_to_class = dict()
for idx, col in enumerate(df_train.target.unique()):
order_to_class[idx] = col
class_to_order[col] = idx
df_train["target_ord"] = df_train["target"].map(class_to_order).astype("int16")
feature_columns = [
col for col in df_train.columns if col.startswith("feat_")
]
target_column = ["target_ord"]
X_train, X_valid, y_train, y_valid = train_test_split(
df_train[feature_columns], df_train[target_column],
test_size=0.3, random_state=42,
stratify=df_train[target_column]
)
tfidf = TfidfTransformer()
tfidf_feature_train = tfidf.fit_transform(X_train).toarray().astype("float32")
tfidf_feature_valid = tfidf.transform(X_valid).toarray().astype("float32")
X_train_tfidf = np.hstack((X_train.values, tfidf_feature_train))
X_valid_tfidf = np.hstack((X_valid.values, tfidf_feature_valid))
params = {
'objective': "multiclass",
'metric': {"multi_logloss"},
'num_class': 9,
'seed': 42,
'lambda_l1': 0.0036682603550733813,
'lambda_l2': 8.924549306063208,
'num_leaves': 113,
'feature_fraction': 0.48000000000000004,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'min_child_samples': 20
}
dataset_train = lgb.Dataset(X_train_tfidf, y_train)
dataset_valid = lgb.Dataset(X_valid_tfidf, y_valid)
booster = lgb.train(
params,
dataset_train,
feature_name=(
[f"feat_{i}" for i in range(1, 94)]
+ [f"tfidf_{i}" for i in range(1, 94)]
),
num_boost_round=500,
valid_sets=dataset_valid,
early_stopping_rounds=20,
)
best_iteration = booster.best_iteration
print(best_iteration)
lgb.plot_importance(
booster,
max_num_features=30,
figsize=(12, 10),
dpi=300,
);
df_test = pd.read_csv(
"/kaggle/input/otto-group-product-classification-challenge/test.csv",
dtype=dtypes
).set_index("id")
tfidf = TfidfTransformer()
tfidf_feature_train_all = tfidf.fit_transform(df_train[feature_columns]).toarray().astype("float32")
X_train_all_tfidf = np.hstack((df_train[feature_columns].values, tfidf_feature_train_all))
dataset_train_all = lgb.Dataset(X_train_all_tfidf, df_train[target_column])
booster = lgb.train(
params,
dataset_train_all,
feature_name=(
[f"feat_{i}" for i in range(1, 94)]
+ [f"tfidf_{i}" for i in range(1, 94)]
),
num_boost_round=best_iteration,
)
df_test
tfidf_feature_test = tfidf.transform(df_test).toarray()
X_test_tfidf = np.hstack((df_test[feature_columns].values, tfidf_feature_test))
pred = booster.predict(X_test_tfidf)
for idx, col in order_to_class.items():
df_test[col] = pred[:,idx]
df_test[[f"Class_{i}" for i in range(1, 10)]].to_csv('submission.csv', index=True)
| [
"[email protected]"
] | |
ecbea36070dd712629e55b616938b75491ba10b9 | 3a8f8bef453f5eb01cc6f22d8bb140d7791024df | /command/tcommand.py | add4fd8bf60184b1755ced53d3534642b3e2870a | [] | no_license | thomasvs/python-command | 23a68de2ce596a7eed5a2740a5ee1471f62ed569 | 4c31072e9f5f68e22c92cdc8f0a02d911b7e5fc0 | refs/heads/master | 2020-05-02T11:29:24.459355 | 2014-09-07T22:23:58 | 2014-09-07T22:23:58 | 5,668,726 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,146 | py | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
"""
A helper class for Twisted commands.
"""
from twisted.internet import defer
from twisted.python import failure
import command
class TwistedCommand(command.Command):
"""
I am a Command that integrates with Twisted and its reactor.
Instead of implementing the do() method, subclasses should implement a
doLater() method which returns a deferred.
"""
def installReactor(self, reactor=None):
"""
Override me to install your own reactor in the parent
ReactorCommand.
"""
self.debug('installing reactor %r in ancestor ReactorCommand',
reactor)
c = self
while c.parentCommand and not isinstance(c, ReactorCommand):
c = c.parentCommand
if not c:
raise AssertionError(
'%r does not have a parent ReactorCommand' % self)
self.debug('installing reactor %r in ancestor ReactorCommand %r',
reactor, c)
c.installReactor(reactor)
### command.Command implementations
def do(self, args):
self.debug('%r: installing reactor using method %r', self,
self.installReactor)
self.installReactor()
d = self.doLater(args)
return d
### command.TwistedCommand methods to implement by subclasses
def doLater(self, args):
"""
@rtype: L{defer.Deferred}
"""
raise NotImplementedError
class ReactorCommand(command.Command):
"""
I am a Command that runs a reactor for its subcommands if they
return a L{defer.Deferred} from their doLater() method.
"""
reactor = None
returnValue = None
_reactorRunning = False
def installReactor(self, reactor=None):
"""
Override me to install your own reactor.
"""
self.debug('ReactorCommand: installing reactor %r', reactor)
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
### command.Command overrides
def parse(self, argv):
"""
I will run a reactor to get the non-deferred result.
"""
self.debug('parse: chain up')
try:
r = command.Command.parse(self, argv)
except Exception:
# get a full traceback to debug here
f = failure.Failure()
self.warning('Exception during %r.parse: %r\n%s\n',
self, f.getErrorMessage(), f.getTraceback())
self.stderr.write('Exception: %s\n' % f.value)
raise
self.debug('parse: result %r', r)
# if it's not a deferred, return the result as is
if not isinstance(r, defer.Deferred):
return r
# We have a deferred, so we need to run a reactor
d = r
# child commands could have installed a reactor
if not self.reactor:
self.installReactor()
def parseCb(ret):
if ret is None:
self.debug('parse returned None, defaults to exit code 0')
ret = 0
elif ret:
self.debug('parse returned %r' % ret)
elif self.parser.help_printed or self.parser.usage_printed:
ret = 0
self.debug('parse: cb: done')
self.returnValue = ret
if self._reactorRunning:
self._reactorRunning = False
self.debug('stopping reactor')
self.reactor.stop()
return ret
def parseEb(failure):
self.debug('parse: eb: failure: %r\n%s\n',
failure.getErrorMessage(), failure.getTraceback())
# we can get here even before we run the reactor below;
# so schedule a stop instead of doing it here
# self.reactor.stop()
self.reactor.callLater(0, self.reactor.stop)
if failure.check(command.CommandExited):
self.stderr.write(failure.value.output + '\n')
reason = failure.value.status
self.returnValue = reason
return reason
self.warning('errback: %r', failure.getErrorMessage())
self.stderr.write('Failure: %s\n' % failure.value)
self.returnValue = failure
# we handled it by storing it for reraising, so don't
# return it
return
d.addCallback(parseCb)
d.addErrback(parseEb)
def raiseIfFailure():
if isinstance(self.returnValue, failure.Failure):
raise self.returnValue.value
if self.returnValue is not None:
self.debug('got return value before reactor ran, returning %r' %
self.returnValue)
raiseIfFailure()
return self.returnValue
self.debug('running reactor %r', self.reactor)
self._reactorRunning = True
self.reactor.run()
self.debug('ran reactor, got %r' % self.returnValue)
raiseIfFailure()
self.debug('ran reactor, returning %r' % self.returnValue)
return self.returnValue
| [
"thomas (at) apestaart (dot) org"
] | thomas (at) apestaart (dot) org |
79f50a378ab45f7801f359d695045b821ff47443 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201101154742.py | c093972aa62bcc31bf99b51feb72a76950605747 | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,829 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val, increase):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
flag1 = 0
val = 0
pos = (0, 0)
blink = False
input_lock = 0
get_cord((0, 0))
set_highlight((0, 0), (0, 0), (0, 0), input_lock)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN:
flag1 = 1
pos = pg.mouse.get_pos()
get_cord(pos)
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight((0, 0), (0, 0), (0, 0), 0)
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val, display):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = 0
val = 0
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
display.draw(board)
if blink:
cell = display.find_cell(box_index_x, box_index_y)
alpha = display.blink()
print("start pos x: ", floor(cell[0]), "start pos y: ", floor(cell[1]), "end pos x: ", floor(cell[2]), "end pos y: ", floor(cell[3]))
cell_width = int(cell[2])
cell_height = int(cell[3])
start_pos_X = int(cell[0])
start_pos_y = int(cell[1])
rect = pg.Surface((cell_width, cell_height))
rect.set_alpha(alpha)
# pg.draw.rect(self.screen, GREEN, cell)
self.screen.blit(rect, (rect.x, rect.y))
# print(box_index_x, box_index_y)
if input_lock == 1:
display.update(board, row_index, col_index, blk_index)
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"[email protected]"
] | |
17154043547b14982f365d99f3c9ecf178e27f2c | 700d4302a675b6aaaa7514a87d87ccd614051712 | /electrum_dash/gui/qt/dash_net_dialog.py | 99e82390ee338bfa47233139df37e07b96842c65 | [
"MIT"
] | permissive | bynicolas/electrum-pac | ce37033c6274f671674bf5d707010d31dab254b5 | 8c61fc2e14fc307f40d1cc785f2a604ab4a2be04 | refs/heads/master | 2023-03-11T14:29:36.624470 | 2021-02-23T23:25:20 | 2021-02-23T23:25:20 | 340,194,875 | 0 | 0 | MIT | 2021-02-24T00:02:54 | 2021-02-18T22:32:44 | Python | UTF-8 | Python | false | false | 17,492 | py | # -*- coding: utf-8 -*-
import time
from enum import IntEnum
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import (QGridLayout, QDialog, QVBoxLayout, QCheckBox,
QTabWidget, QWidget, QLabel, QSpinBox, QLineEdit,
QTreeWidget, QTreeWidgetItem, QMenu, QHeaderView)
from electrum_dash import constants
from electrum_dash.dash_net import MIN_PEERS_LIMIT, MAX_PEERS_LIMIT
from electrum_dash.i18n import _
from electrum_dash.logging import get_logger
from .util import Buttons, CloseButton
_logger = get_logger(__name__)
MATCH_STR_CS = Qt.MatchFixedString | Qt.MatchCaseSensitive
class DashPeersWidget(QTreeWidget):
class Columns(IntEnum):
PEER = 0
UAGENT = 1
PING = 2
READ = 3
WRITE = 4
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Peer'), _('User Agent'), _('Ping time (ms)'),
_('Received KiB'), _('Sent KiB')])
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(self.Columns.PEER, mode)
h.setSectionResizeMode(self.Columns.UAGENT, mode)
h.setSectionResizeMode(self.Columns.PING, mode)
h.setSectionResizeMode(self.Columns.READ, mode)
h.setSectionResizeMode(self.Columns.WRITE, mode)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
dash_net = self.parent.network.dash_net
peer = item.text(self.Columns.PEER)
menu = QMenu()
menu.addAction(_('Disconnect'), lambda: self.disconnect(peer))
if not dash_net.use_static_peers:
menu.addAction(_('Ban'),
lambda: self.disconnect(peer, 'ban from gui'))
menu.exec_(self.viewport().mapToGlobal(position))
def disconnect(self, peer, msg=None):
dash_net = self.parent.network.dash_net
dash_peer = dash_net.peers.get(peer)
if dash_peer:
if msg:
dash_peer.ban(msg)
dash_peer.close()
def update(self, event=None, args=None):
dash_net = self.parent.network.dash_net
peers = dash_net.peers
if event is None:
self.clear()
for peer, dash_peer in sorted(list(peers.items())):
self.add_peer(peer, dash_peer)
elif event == 'dash-peers-updated':
action, peer = args
if action == 'added':
dash_peer = peers.get(peer)
if dash_peer:
self.add_peer(peer, dash_peer, insert=True)
elif action == 'removed':
items = self.findItems(peer, MATCH_STR_CS)
if items:
idx = self.indexOfTopLevelItem(items[0])
self.takeTopLevelItem(idx)
elif event == 'dash-net-activity':
for peer, dash_peer in sorted(list(peers.items())):
items = self.findItems(peer, MATCH_STR_CS)
if items:
ping_time = str(dash_peer.ping_time)
read_kbytes = str(round(dash_peer.read_bytes/1024, 1))
write_kbytes = str(round(dash_peer.write_bytes/1024, 1))
for i in items:
i.setText(self.Columns.PING, ping_time)
i.setText(self.Columns.READ, read_kbytes)
i.setText(self.Columns.WRITE, write_kbytes)
super().update()
def add_peer(self, peer, dash_peer, insert=False):
dash_net = self.parent.network.dash_net
peers = dash_net.peers
v = dash_peer.version
user_agent = v.user_agent.decode('utf-8')
ping_time = str(dash_peer.ping_time)
read_kbytes = str(round(dash_peer.read_bytes/1024, 1))
write_kbytes = str(round(dash_peer.write_bytes/1024, 1))
peers_item = QTreeWidgetItem([peer, user_agent, ping_time,
read_kbytes, write_kbytes])
if peers:
sorted_peers = sorted(list(peers.keys()))
if peer in sorted_peers:
idx = sorted_peers.index(peer)
self.insertTopLevelItem(idx, peers_item)
else:
self.addTopLevelItem(peers_item)
else:
self.addTopLevelItem(peers_item)
class SporksWidget(QTreeWidget):
class Columns(IntEnum):
NAME = 0
ACTIVE = 1
VALUE = 2
DEFAULT = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Spork'), _('Active'), _('Value'), ''])
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(self.Columns.NAME, mode)
h.setSectionResizeMode(self.Columns.ACTIVE, mode)
h.setSectionResizeMode(self.Columns.VALUE, mode)
h.setSectionResizeMode(self.Columns.DEFAULT, mode)
def update(self):
dash_net = self.parent.network.dash_net
sporks_dict = dash_net.sporks.as_dict()
self.clear()
for k in sorted(list(sporks_dict.keys())):
name = sporks_dict[k]['name']
active = str(sporks_dict[k]['active'])
value = str(sporks_dict[k]['value'])
default = _('Default') if sporks_dict[k]['default'] else ''
spork_item = QTreeWidgetItem([name, active, value, default])
self.addTopLevelItem(spork_item)
super().update()
class BanlistWidget(QTreeWidget):
class Columns(IntEnum):
PEER = 0
UA = 1
MSG = 2
AT = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Peer'), _('User Agent'),
_('Message'), _('Ban time')])
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(self.Columns.PEER, mode)
h.setSectionResizeMode(self.Columns.UA, mode)
h.setSectionResizeMode(self.Columns.MSG, mode)
h.setSectionResizeMode(self.Columns.AT, mode)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
peer = item.text(self.Columns.PEER)
menu = QMenu()
menu.addAction(_('Remove'), lambda: self.unban(peer))
menu.exec_(self.viewport().mapToGlobal(position))
def unban(self, peer):
dash_net = self.parent.network.dash_net
if peer:
dash_net._remove_banned_peer(peer)
def update(self, event=None, args=None):
dash_net = self.parent.network.dash_net
banlist = dash_net.banlist
if event is None:
self.clear()
for peer in sorted(list(banlist.keys())):
self.add_peer(peer)
else:
action, peer = args
if action == 'added':
self.add_peer(peer, insert=True)
elif action == 'removed':
items = self.findItems(peer, MATCH_STR_CS)
if items:
idx = self.indexOfTopLevelItem(items[0])
self.takeTopLevelItem(idx)
super().update()
def add_peer(self, peer, insert=False):
dash_net = self.parent.network.dash_net
banlist = dash_net.banlist
ua = banlist[peer]['ua']
at = str(time.ctime(banlist[peer]['at']))
msg = str(banlist[peer]['msg'])
banlist_item = QTreeWidgetItem([peer, ua, msg, at])
if banlist:
sorted_banlist = sorted(list(banlist.keys()))
if peer in sorted_banlist:
idx = sorted_banlist.index(peer)
self.insertTopLevelItem(idx, banlist_item)
else:
self.addTopLevelItem(banlist_item)
else:
self.addTopLevelItem(banlist_item)
class DashNetDialogLayout(object):
def __init__(self, network, config, parent):
self.parent = parent
self.network = network
self.config = config
self.tabs = tabs = QTabWidget()
dash_net_tab = QWidget()
sporks_tab = QWidget()
banlist_tab = QWidget()
bls_speed_tab = QWidget()
tabs.addTab(dash_net_tab, _('Dash Network'))
tabs.addTab(sporks_tab, _('Sporks'))
tabs.addTab(banlist_tab, _('Banlist'))
if parent.is_testnet:
tabs.addTab(bls_speed_tab, _('BLS Speed'))
self.min_t = 1000
self.max_t = 0
self.n_measures = -1
def min_str():
return _('Min time') + f': {self.min_t}'
def max_str():
return _('Max time') + f': {self.max_t}'
self.min_label = QLabel(min_str())
self.max_label = QLabel(max_str())
vbox = QVBoxLayout(bls_speed_tab)
vbox.addWidget(self.min_label)
vbox.addWidget(self.max_label)
self.timer = QTimer()
self.timer.setInterval(500)
def update_bls_speed():
if self.parent.isVisible() and bls_speed_tab.isVisible():
start_t = time.time()
res = self.network.dash_net.test_bls_speed()
res_t = time.time() - start_t
_logger.info(f'Test BLS Speed: res={res}, time={res_t}')
self.min_t = min(self.min_t, res_t)
self.max_t = max(self.max_t, res_t)
self.min_label.setText(min_str())
self.max_label.setText(max_str())
self.n_measures += 1
if self.n_measures >= 100:
self.timer.stop()
self.timer.timeout.connect(update_bls_speed)
def on_tabs_current_changed(*args):
cur_widget = self.tabs.currentWidget()
if cur_widget == bls_speed_tab and self.n_measures < 0:
self.n_measures = 0
self.timer.start()
tabs.currentChanged.connect(on_tabs_current_changed)
# Dash Network tab
grid = QGridLayout(dash_net_tab)
grid.setSpacing(8)
dash_net = self.network.dash_net
net = self.network
# row 0
self.both_kb = QLabel()
self.read_kb = QLabel()
self.write_kb = QLabel()
grid.addWidget(self.both_kb, 0, 0, 1, 2)
grid.addWidget(self.read_kb, 0, 2, 1, 2)
grid.addWidget(self.write_kb, 0, 4, 1, 2)
self.run_dash_net_cb = QCheckBox(_('Enable Dash Network'))
self.run_dash_net_cb.setChecked(self.config.get('run_dash_net', True))
run_dash_net_modifiable = self.config.is_modifiable('run_dash_net')
self.run_dash_net_cb.setEnabled(run_dash_net_modifiable)
def on_run_dash_net_cb_clicked(run_dash_net):
self.config.set_key('run_dash_net', run_dash_net, True)
net.run_from_another_thread(net.dash_net.set_parameters())
self.run_dash_net_cb.clicked.connect(on_run_dash_net_cb_clicked)
grid.addWidget(self.run_dash_net_cb, 0, 6, 1, 2)
# row 1
is_cmd_dash_peers = dash_net.is_cmd_dash_peers
use_static_peers = dash_net.use_static_peers
static_peers_label = QLabel(_('Static Peers:'))
grid.addWidget(static_peers_label, 1, 0, 1, 1)
self.dash_peers_e = QLineEdit()
self.dash_peers_e.setText(dash_net.dash_peers_as_str())
self.dash_peers_e.setReadOnly(is_cmd_dash_peers)
def on_dash_peers_editing_end():
if is_cmd_dash_peers:
return
res = dash_net.dash_peers_from_str(self.dash_peers_e.text())
if type(res) == str:
self.err_label.setText(f'Error: {res}')
else:
self.config.set_key('dash_peers', res, True)
if dash_net.use_static_peers:
net.run_from_another_thread(net.dash_net.set_parameters())
self.dash_peers_e.editingFinished.connect(on_dash_peers_editing_end)
def on_dash_peers_changed():
self.err_label.setText('')
self.dash_peers_e.textChanged.connect(on_dash_peers_changed)
grid.addWidget(self.dash_peers_e, 1, 1, 1, 5)
self.use_static_cb = QCheckBox(_('Use Static Peers'))
self.use_static_cb.setChecked(use_static_peers)
self.use_static_cb.setEnabled(not is_cmd_dash_peers)
def on_use_static_cb_clicked(use_static):
self.config.set_key('dash_use_static_peers', use_static, True)
net.run_from_another_thread(net.dash_net.set_parameters())
self.use_static_cb.clicked.connect(on_use_static_cb_clicked)
grid.addWidget(self.use_static_cb, 1, 6, 1, 2)
# row 2 with error msg
self.err_label = QLabel('')
self.err_label.setObjectName('err-label')
grid.addWidget(self.err_label, 2, 0, 1, -1)
# row 3
self.status_label = QLabel('')
grid.addWidget(self.status_label, 3, 0, 1, 6)
max_peers_label = _('Max Peers:')
grid.addWidget(QLabel(max_peers_label), 3, 6, 1, 1)
self.max_peers = QSpinBox()
self.max_peers.setValue(dash_net.max_peers)
self.max_peers.setRange(MIN_PEERS_LIMIT, MAX_PEERS_LIMIT)
grid.addWidget(self.max_peers, 3, 7, 1, 1)
def on_change_max_peers(max_peers):
dash_net.max_peers = max_peers
self.max_peers.valueChanged.connect(on_change_max_peers)
# row 4
self.dash_peers_list = DashPeersWidget(self)
grid.addWidget(self.dash_peers_list, 4, 0, 1, -1)
# Dash Sporks tab
vbox = QVBoxLayout(sporks_tab)
sporks_label = QLabel(_('Dash Sporks Values'))
self.sporks_list = SporksWidget(self)
vbox.addWidget(sporks_label)
vbox.addWidget(self.sporks_list)
# Dash Banlist tab
vbox = QVBoxLayout(banlist_tab)
banlist_label = QLabel(_('Banned Dash Peers'))
self.banlist_list = BanlistWidget(self)
vbox.addWidget(banlist_label)
vbox.addWidget(self.banlist_list)
# init layout
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
self.update()
def update(self, event=None, args=None):
is_visible = self.parent.isVisible()
if event is not None and not is_visible:
return
if event is None:
self.update_dash_net_tab()
self.sporks_list.update()
self.banlist_list.update()
elif event in ['dash-peers-updated', 'dash-net-activity']:
self.update_dash_net_tab(event, args)
elif event == 'sporks-activity':
self.sporks_list.update()
elif event == 'dash-banlist-updated':
self.banlist_list.update(event, args)
def update_dash_net_tab(self, event=None, args=None):
dash_net = self.network.dash_net
self.dash_peers_list.update(event, args)
if event in [None, 'dash-net-activity']:
read_bytes = dash_net.read_bytes
write_bytes = dash_net.write_bytes
both_kb = round((write_bytes + read_bytes)/1024, 1)
read_kb = round(read_bytes/1024, 1)
write_kb = round(write_bytes/1024, 1)
self.both_kb.setText(_('Total') + f': {both_kb} KiB')
self.read_kb.setText(_('Received') + f': {read_kb} KiB')
self.write_kb.setText(_('Sent') + f': {write_kb} KiB')
if event in [None, 'dash-peers-updated']:
status = _('Connected Peers') + f': {len(dash_net.peers)}'
self.status_label.setText(status)
def layout(self):
return self.layout_
class DashNetDialog(QDialog):
def __init__(self, network, config, dash_net_sobj):
QDialog.__init__(self)
self.setWindowTitle(_('Dash Network'))
self.setMinimumSize(700, 400)
self.is_testnet = constants.net.TESTNET
self.network = network
self.dnlayout = DashNetDialogLayout(network, config, self)
self.dash_net_sobj = dash_net_sobj
vbox = QVBoxLayout(self)
vbox.addLayout(self.dnlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.dash_net_sobj.dlg.connect(self.on_updated)
def show(self):
super(DashNetDialog, self).show()
if self.network:
self.network.dash_net.register_callback(self.on_dash_net,
['dash-peers-updated',
'dash-net-activity',
'sporks-activity',
'dash-banlist-updated'])
def closeEvent(self, e):
if self.dnlayout.err_label.text():
e.ignore()
if self.network:
self.network.dash_net.unregister_callback(self.on_dash_net)
def on_dash_net(self, event, *args):
self.dash_net_sobj.dlg.emit(event, args)
def on_updated(self, event=None, args=None):
self.dnlayout.update(event, args)
| [
"[email protected]"
] | |
d4533f4cdf53a8a902ef0e5e52f13d6ae690bf32 | cfc3fa658f826d02308453e557d82758895399c2 | /datasets/id_newspapers_2018/id_newspapers_2018.py | 96a294e8fc22502654396c9ba5f85efe68734ddd | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | meehawk/datasets | cac530ec0e17514c01cdff30302521d6303ed93b | b70141e3c5149430951773aaa0155555c5fb3e76 | refs/heads/master | 2023-03-29T12:51:54.700891 | 2021-04-08T17:22:53 | 2021-04-08T17:22:53 | 355,996,122 | 9 | 0 | Apache-2.0 | 2021-04-08T17:31:03 | 2021-04-08T17:31:02 | null | UTF-8 | Python | false | false | 4,123 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Indonesian Newspapers 2018"""
from __future__ import absolute_import, division, print_function
import glob
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{id_newspapers_2018,
author = {},
title = {Indonesian Newspapers 2018},
year = {2019},
url = {https://github.com/feryandi/Dataset-Artikel},
}
"""
_DESCRIPTION = """\
The dataset contains around 500K articles (136M of words) from 7 Indonesian newspapers: Detik, Kompas, Tempo,
CNN Indonesia, Sindo, Republika and Poskota. The articles are dated between 1st January 2018 and 20th August 2018
(with few exceptions dated earlier). The size of uncompressed 500K json files (newspapers-json.tgz) is around 2.2GB,
and the cleaned uncompressed in a big text file (newspapers.txt.gz) is about 1GB. The original source in Google Drive
contains also a dataset in html format which include raw data (pictures, css, javascript, ...)
from the online news website
"""
_HOMEPAGE = "https://github.com/feryandi/Dataset-Artikel"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International Public License"
_URLs = ["http://cloud.uncool.ai/index.php/s/kF83dQHfGeS2LX2/download"]
class IdNewspapers2018Config(datasets.BuilderConfig):
"""BuilderConfig for IdNewspapers2018"""
def __init__(self, **kwargs):
"""BuilderConfig for IdNewspapers2018.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IdNewspapers2018Config, self).__init__(**kwargs)
class IdNewspapers2018(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
IdNewspapers2018Config(
name="id_newspapers_2018",
version=VERSION,
description="IdNewspapers2018 dataset",
),
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"date": datasets.Value("string"),
"title": datasets.Value("string"),
"content": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[0]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"article_dir": os.path.join(data_dir, "newspapers"),
"split": "train",
},
)
]
def _generate_examples(self, article_dir, split):
logger.info("⏳ Generating %s examples from = %s", split, article_dir)
id = 0
for path in sorted(glob.glob(os.path.join(article_dir, "**/*.json"), recursive=True)):
with open(path, encoding="utf-8") as f:
data = json.load(f)
yield id, {
"id": str(id),
"url": data["url"],
"date": data["date"],
"title": data["title"],
"content": data["content"],
}
id += 1
| [
"[email protected]"
] | |
94e2c2a401b125a43cee98d701cd7ec13826b551 | 773dc03117f8b0d51f7a10e2a4577229c8be6ba3 | /migrations/models/36_20230108160220_update.py | 825e7924ebf0640cde169d23190cb1cc5555254b | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | tcprescott/sahasrahbot | 382cdff058d63feb5f42dbbd7729eb4b08c4d1bd | 64a125d948873d0faa5ea3f2d306075ad9e013be | refs/heads/master | 2023-08-31T15:33:01.533206 | 2023-08-31T01:58:48 | 2023-08-31T01:58:48 | 178,310,225 | 22 | 43 | MIT | 2023-09-01T08:45:52 | 2019-03-29T01:34:45 | Python | UTF-8 | Python | false | false | 380 | py | from tortoise import BaseDBAsyncClient
async def upgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE `ranked_choice_election` ADD `private` BOOL NOT NULL DEFAULT 0;
DROP TABLE IF EXISTS `twitch_channels`;"""
async def downgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE `ranked_choice_election` DROP COLUMN `private`;"""
| [
"[email protected]"
] | |
c2ee27335ec1db4df52d38e9bcdabfb39e334cc2 | 8239e45b6b031839dcd464bc80a6c8d17ed2f7b7 | /cloudarmy/contrib/conditions/environment.py | 53177af7d135ab208dc6fbe1359908ca766b4a45 | [] | no_license | geeknam/cloudarmy | 401efaee8c8e5e916ddff757edcc657698d9687f | 4363d5bdf8719a8f8bab8104c8ea7d2247d15746 | refs/heads/master | 2021-07-11T19:44:41.769661 | 2016-03-14T12:43:47 | 2016-03-14T12:43:47 | 52,852,867 | 3 | 1 | null | 2021-03-25T21:40:17 | 2016-03-01T06:11:43 | Python | UTF-8 | Python | false | false | 278 | py | from troposphere import Ref, Equals
class EnvironmentCondition(object):
conditions = {
"IsProduction": Equals(
Ref("EnvironmentType"), "production"
),
"IsStaging": Equals(
Ref("EnvironmentType"), "staging"
),
}
| [
"[email protected]"
] | |
3fe21eecf2598ae7479b29ddce155256c9cd28be | 225543bcaa194360aa66c738a99b7ad5c291434b | /main_210610.py | 2a8f1f54bfe2a777202b6b4753473944607604b4 | [] | no_license | m0100434/zendlijsten | f0eecf12ab3fc90c1db9b5c22f1163a92dcdf6f7 | 171e1c427db71dad01408072081c85035c57a2b2 | refs/heads/main | 2023-06-19T05:04:31.619139 | 2021-07-17T07:51:46 | 2021-07-17T07:51:46 | 349,770,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,358 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 11:56:59 2021
@author: ArxXi
"""
from selenium import webdriver
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
import pickle
from datetime import date
def save_cookie(driver, path):
with open(path, 'wb') as filehandler:
pickle.dump(driver.get_cookies(), filehandler)
def load_cookie(driver, path):
with open(path, 'rb') as cookiesfile:
cookies = pickle.load(cookiesfile)
for cookie in cookies:
driver.add_cookie(cookie)
def remove_entry(index):
ourtime.pop(index-entries_deleted)
# print("time which is going to be deleted = "+ ourtime[index])
# ourtime[index] = "-"
"""
Een v
VTM v
Vier v
Canvas v
Vitaya = vtm 4 v
Q2 v
Vijf v
CAZ = vtm 3 v
Zes v
Ketnet v
La Une v
RTL-TVI v
AB3 ?
La Deux v
Club RTL v
Plug RTL ?
La Trois v
Nickelodeon FR ?
"""
def channel_identifier(anchor_link):
tmp = anchor_link.split("/")
if(tmp[4] == "een"):
return "een"
if (tmp[4] == "canvas"):
return "canvas"
if (tmp[4] == "vtm"):
return "vtm"
if (tmp[4] == "vier"):
return "vier"
if (tmp[4] == "vijf"):
return "vijf"
if (tmp[4] == "zes"):
return "zes"
if (tmp[4] == "rtl-tvi-hd"):
return "RTI TVI HD"
if (tmp[4] == "la-une"):
return "LA UNE"
if (tmp[4] == "la-deux"):
return "LA DEUX"
if (tmp[4] == "ketnet"):
return "KETNET"
if (tmp[4] == "vtm2"):
return "vtm2"
if (tmp[4] == "vtm3"):
return "vtm3"
if (tmp[4] == "club-rtl"):
return "club-rtl"
if (tmp[4] == "vtm4"):
return "vtm4"
if (tmp[4] == "caz-2"):
return "caz-2"
if (tmp[4] == "la-trois"):
return "la-trois"
return "null"
# options = FirefoxOptions()
# options.add_arguments("--headless")
# driver = webdriver.Firefox(options=options)
#0 click een, canvas,vtm, vier
#1 click vjtf
#2 click zes
#9 click la une , la deux, ketnet, la trois
#14 click
date_of_movie = ""
links_traveresed = 0
default_link = "https://www.demorgen.be/tv-gids/dag/10-06-2021"
if(len(default_link.split("/")) ==6):
date_of_movie =default_link.split("/")[5]
print("got true")
else:
date_of_movie = date.today()
date_of_movie = date_of_movie.strftime('%d/%m/%y')
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(default_link)
# driver.implicitly_wait(15)
delay = 10 # seconds
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'sp_message_iframe_404503')))
print("Iframe element ready")
except TimeoutException:
print("Iframe not loaded issue")
a = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(1)
print("switching to iframe done")
green_button = driver.find_element_by_xpath('//button[text()="Akkoord"]')
green_button.click()
time.sleep(10)
print("It will be on schedule website")
driver.switch_to.default_content()
#declarration
iteration = 0
ourtime = []
channel_names = []
ad_index = 82
associated_channel_name = []
production_date = []
show_title = []
current_episode = []
total_episode = []
season_number = []
myepisode_number = ""
description = []
genre = []
series_movie = []
actors = []
episode_text = " "
entries_deleted = 0
number_of_clicks = [0,1,2,6,9,14]
links = []
while (iteration != (len(number_of_clicks))):
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, '/html/body/main/div/div/div[2]/div/div/div[1]/div[2]/button[2]')))
next_button = driver.find_element_by_xpath("/html/body/main/div/div/div[2]/div/div/div[1]/div[2]/button[2]")
for i in range(0, number_of_clicks[iteration]):
print("next button should be clicked")
next_button.click()
driver.implicitly_wait(2)
print("Next Button located")
except TimeoutException:
print("Next Button Not Located")
a = driver.find_elements_by_class_name("tvgm-channel__logo-placeholder")
#Getting channel names on current page
for i in range(0,len(a)):
ourlink = a[i].get_property("href")
distributed = ourlink.split("/")
channel = distributed[4]
channel_names.append(channel)
#time of shows
b = driver.find_elements_by_class_name("tvgm-broadcast-teaser__time")
for i in range(0,len(b)):
ourtime.append(b[i].text)
c = driver.find_elements_by_class_name("tvgm-broadcast-teaser__link")
for i in range(0,len(c)):
if((c[i].get_property("href")) not in links):
links.append(c[i].get_property("href"))
#getting link
for i in range(links_traveresed,len(links)):
tmp = links[i]
episode_text = " "
if(channel_identifier(tmp) != "null"):
associated_channel_name.append(channel_identifier(tmp))
driver.get(tmp)
#Page visited
try:
production_date.append(driver.find_element_by_class_name("tvgm-broadcast-detail__productionyear").text)
except NoSuchElementException:
print("Production Date not found")
production_date.append("-")
try:
show_title.append(driver.find_element_by_class_name("tvgm-broadcast-detail__title").text)
except NoSuchElementException:
print("Show title not found")
show_title.append("-")
try:
description.append(driver.find_element_by_class_name("tvgm-broadcast-detail__description").text)
except NoSuchElementException:
print("Description not found")
description.append("-")
try:
actors.append(driver.find_element_by_class_name("tvgm-broadcast-detail__castandcrew").text)
except NoSuchElementException:
print("Actors not found")
actors.append("-")
try:
temp = driver.find_element_by_class_name("tvgm-broadcast-detail__info-playable").text
temp = temp.split(",")
if(len(temp) == 2):
series_movie.append(temp[0])
genre.append(temp[1])
print("This got executed (Genre)")
if (len(temp) == 1):
series_movie.append(temp[0])
genre.append("-")
except NoSuchElementException:
print("Series/Movie not found")
series_movie.append("-")
genre.append("-")
try:
driver.find_element_by_class_name("tvgm-broadcast-detail__episode-numbers")
myepisode_number = driver.find_element_by_class_name("tvgm-broadcast-detail__episode-numbers").text
tmp = myepisode_number.split(" ")
season_number.append(tmp[1])
#changing done
if(len(tmp)>2):
combined_episode_number = tmp[3].split("/")
if(len(combined_episode_number) ==2):
current_episode.append(combined_episode_number[0])
total_episode.append(combined_episode_number[1])
print("This got executed (Episodes)")
if (len(combined_episode_number) == 1):
current_episode.append(combined_episode_number[0])
total_episode.append("-")
else:
#if both not available
total_episode.append("-")
current_episode.append("-")
print("Epsisode starting and ending exist ")
except NoSuchElementException:
print("Starting ending Episode not exist")
season_number.append("-")
current_episode.append("-")
total_episode.append("-")
#tester
#break
else:
#not interested in this channel
remove_entry(i)
entries_deleted = entries_deleted +1
print("****** ENTRY SKIPPED ********")
links_traveresed = len(links)
#tester
# if(i == ad_index):
# break
driver.get(default_link)
iteration = iteration+1
driver.close()
# print("Starting time = " + ourtime[ad_index])
# print("Actors = " + actors[ad_index])
# print("Associated Channel Name = " + associated_channel_name[ad_index])
# print("Production Date = " + production_date[ad_index])
# print("Show title = " + show_title[ad_index])
# print("Current Episode = " + current_episode[ad_index])
# print("Total Episode = " + total_episode[ad_index])
# print("Genre = " + genre[ad_index])
# print("Series_Movie = " + series_movie[ad_index])
# print("Season Number = " + season_number[ad_index])
# for i in range(0,len(ourtime)):
# if(ourtime[i] == "-"):
# del(ourtime[i])
print(ourtime)
print(actors)
print(associated_channel_name)
print(production_date)
print(show_title)
print(current_episode)
print(total_episode)
print(genre)
print(series_movie)
print(season_number)
print(len(ourtime))
print(len(actors))
print(len(associated_channel_name))
print(len(production_date))
print(len(show_title))
print(len(current_episode))
print(len(total_episode))
print(len(genre))
print(len(series_movie))
print(len(season_number))
import csv
with open('channel_data_210610.csv', mode='w',newline='') as employee_file:
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0,len(ourtime)):
if(i==0):
employee_writer.writerow(["Date of Movie","Starting Time","Actors","Channel Name","Production Date","Title of Show","Current Episode","Total Episodes","Genre","Series/Movie","Season Number"])
employee_writer.writerow([date_of_movie,ourtime[i],actors[i],associated_channel_name[i],production_date[i],show_title[i],current_episode[i],total_episode[i],genre[i],series_movie[i],season_number[i]])
| [
"[email protected]"
] | |
95907f7c9ac9ff8ba364dcae91b64148eeed71a5 | 53649e3ecb7023935d612a37ecf5ad45568bbb8d | /Aplikace_1_0/Source/ewitis/gui/DEF_COLUMN.py | e47296468af7ab7e9831c98858c5e460564ed47d | [] | no_license | liuqingchn/ew_aplikace | 157fbc7e0564b29ffe4035724c63d8fc3861512f | efaea537385f9fa90e7f4b4bec430a842c9f7ef6 | refs/heads/master | 2021-01-13T07:20:08.738298 | 2016-04-26T18:54:51 | 2016-04-26T18:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,556 | py | # -*- coding: utf-8 -*-
'''
Created on 27.12.2011
@author: Meloun
'''
""" WIDTHS """
WIDTH_NUMBER_4DIGIT = 40
WIDTH_NUMBER_3DIGIT = 35
"""
RUNS
"""
RUNS = {}
""" table collumns """
RUNS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write": False},
#"date" : {"index": 1, "name": "date", "default": "0.0. 2000 00:00:00", "width": 70, "write": True},
#"description" : {"index": 2, "name": "description", "default": "", "width": 10, "write": True}
}
"""
TIMES
"""
TIMES = {}
""" table collumn for times, mode race """
TIMES['table'] = {
"id" : {"index": 0, "name": "id", "name_cz": u"id", "type":"number", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"nr" : {"index": 1, "name": "nr", "name_cz": u"Číslo", "type":"number", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"cell" : {"index": 2, "name": "cell", "name_cz": u"Buňka", "default": 250, "width": 35, "write":True },
"status" : {"index": 3, "name": "status", "name_cz": u"Status", "default": "race", "width": 60, "write":True },
"time1" : {"index": 4, "name": "time1", "name_cz": u"Čas1", "default": "", "width": 80, "write":False },
"lap1" : {"index": 5, "name": "lap1", "name_cz": u"Okruhy1", "default": "", "width": 50, "write":False },
"time2" : {"index": 6, "name": "time2", "name_cz": u"Čas2", "default": "", "width": 80, "write":False },
"lap2" : {"index": 7, "name": "lap2", "name_cz": u"Okruhy2", "default": "", "width": 50, "write":False },
"time3" : {"index": 8, "name": "time3", "name_cz": u"Čas3", "default": "", "width": 80, "write":False },
"lap3" : {"index": 9, "name": "lap3", "name_cz": u"Okruhy3", "default": "", "width": 50, "write":False },
"time4" : {"index": 10, "name": "time4", "name_cz": u"Čas4", "default": "", "width": 80, "write":False },
"lap4" : {"index": 11, "name": "lap4", "name_cz": u"Okruhy4", "default": "", "width": 50, "write":False },
"name" : {"index": 12, "name": "name", "name_cz": u"Jméno", "default": "unknow", "width": 150, "write":False },
"category" : {"index": 13, "name": "category", "name_cz": u"Kategorie", "default": "unknown", "width": 100, "write":False },
"order1" : {"index": 14, "name": "order1", "name_cz": u"Pořadí1", "type":"number", "default": "", "width": 60, "write":False },
"order2" : {"index": 15, "name": "order2", "name_cz": u"Pořadí2", "type":"number", "default": "", "width": 60, "write":False },
"order3" : {"index": 16, "name": "order3", "name_cz": u"Pořadí3", "type":"number", "default": "", "width": 60, "write":False },
"start_nr" : {"index": 17, "name": "start", "name_cz": u"Start", "default": 1, "width": 50, "write":False },
"points1" : {"index": 18, "name": "points1", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points2" : {"index": 19, "name": "points2", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points3" : {"index": 20, "name": "points3", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points4" : {"index": 21, "name": "points4", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points5" : {"index": 22, "name": "points5", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"un1" : {"index": 23, "name": "un1", "name_cz": u"un1", "default": "", "width": WIDTH_NUMBER_3DIGIT, "write":True },
"un2" : {"index": 24, "name": "un2", "name_cz": u"un2", "default": "", "width": WIDTH_NUMBER_3DIGIT, "write":True },
"un3" : {"index": 25, "name": "un3", "name_cz": u"un3", "default": "", "width": WIDTH_NUMBER_3DIGIT, "write":True },
"us1" : {"index": 26, "name": "us1", "name_cz": u"us1", "default": "", "width": 80, "write":True },
#!! nedavat 'time_raw' => stejne jmeno s tabulkou a kreje se
"timeraw" : {"index": 27, "name": "timeraw", "name_cz": u"Čas Raw", "default": 161, "width": 100, "write":True },
}
"""
USERS
"""
USERS = {}
""" table collumns """
USERS['table'] = { "id" : {"index": 0, "name": "id", "name_cz": u"id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"nr" : {"index": 1, "name": "nr", "name_cz": u"Číslo", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"status" : {"index": 2, "name": "status", "name_cz": u"Status", "default": "race", "width": WIDTH_NUMBER_4DIGIT, "write":True },
"name" : {"index": 3, "name": "name", "name_cz": u"Jméno", "default": "unknown", "width": 100, "write":True },
"first_name" : {"index": 4, "name": "first_name", "name_cz": u"Nevím", "default": "unknown", "width": 100, "write":True },
"category" : {"index": 5, "name": "category", "name_cz": u"Kategorie", "default": "unknown", "width": 100, "write":True },
"club" : {"index": 6, "name": "club", "name_cz": u"Klub", "default": "", "width": 200, "write":True },
"year" : {"index": 7, "name": "year", "name_cz":u"Ročník", "default": "", "width": 70, "write":True },
"sex" : {"index": 8, "name": "sex", "name_cz":u"Pohlaví", "default": "", "width": None, "write":True },
"email" : {"index": 9, "name": "email", "name_cz": u"Email", "default": "", "width": None, "write":True },
"symbol" : {"index": 10, "name": "symbol", "name_cz": u"Nevím", "default": "", "width": None, "write":True },
"paid" : {"index": 11, "name": "paid", "name_cz": u"Nevím", "default": "", "width": None, "write":True },
"note" : {"index": 12, "name": "note", "name_cz": u"Nevím", "default": "", "width": None, "write":True },
"o1" : {"index": 13, "name": "o1", "name_cz":u"#1", "default": "", "width": None, "write":True },
"o2" : {"index": 14, "name": "o2", "name_cz":u"#2", "default": "", "width": None, "write":True },
"o3" : {"index": 15, "name": "o3", "name_cz":u"#3", "default": "", "width": None, "write":True },
"o4" : {"index": 16, "name": "o4", "name_cz":u"#4", "default": "", "width": 10, "write":True },
}
"""
CATEGORIES
"""
CATEGORIES = {}
""" table collumns """
CATEGORIES['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"name" : {"index": 1, "name": "name", "default": "unknown", "width": 200, "write":True },
"description" : {"index": 2, "name": "description", "default": "", "width": 350, "write":True },
"start_nr" : {"index": 3, "name": "start_nr", "default": 1, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g1" : {"index": 4, "name": "g1", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g2" : {"index": 5, "name": "g2", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g3" : {"index": 6, "name": "g3", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g4" : {"index": 7, "name": "g4", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g5" : {"index": 8, "name": "g5", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g6" : {"index": 9, "name": "g6", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g7" : {"index": 10, "name": "g7", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g8" : {"index": 11, "name": "g8", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g9" : {"index": 12, "name": "g9", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g10" : {"index": 13, "name": "g10", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
#"#" : {"index": 14, "name": "#", "width":0},
}
"""
CATEGORY GROUPS
"""
CGROUPS = {}
""" table collumns """
CGROUPS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"label" : {"index": 1, "name": "label", "default": "gx", "width": 300, "write":True },
"name" : {"index": 2, "name": "name", "default": "", "width": 300, "write":True },
"description" : {"index": 3, "name": "description", "default": "", "width": 300, "write":True },
}
"""
TAGS
"""
TAGS = {}
""" table collumns """
TAGS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"tag_id" : {"index": 1, "name": "tag_id", "default": 0, "width": 160, "write":True },
"printed_nr" : {"index": 2, "name": "printed_nr", "default": 0, "width": 80, "write":True },
"user_nr" : {"index": 3, "name": "user_nr", "default": 0, "width": 80, "write":True },
#"#1" : {"index": 4, "name": "", "width":80},
}
"""
ALLTAGS
"""
ALLTAGS = {}
""" database columns """
ALLTAGS['database'] = {
"id" : {"index": 0, "name": "id", "default": 0},
"tag_id" : {"index": 1, "name": "tag_id", "default": 0},
"printed_nr" : {"index": 2, "name": "printed_nr", "default": 0},
"description" : {"index": 3, "name": "description", "default": ""}
}
""" table collumns """
ALLTAGS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"tag_id" : {"index": 1, "name": "tag_id", "default": 0, "width": 160, "write":True },
"printed_nr" : {"index": 2, "name": "printed_nr", "default": 0, "width": 100, "write":True },
"description" : {"index": 3, "name": "description", "default": "", "width": 300, "write":True }
}
"""
POINTS
"""
POINTS = {}
""" table collumns """
POINTS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"order_" : {"index": 1, "name": "order", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True},
"points" : {"index": 2, "name": "points", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True},
"description" : {"index": 3, "name": "description", "default": "", "width": 160, "write":True},
}
"""
RACE INFO
"""
RACEINFO = {}
""" table collumns """
RACEINFO['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"name" : {"index": 1, "name": "id", "default": "unknown", "width": 300, "write":False},
"startlist" : {"index": 2, "name": "startlist", "default": 0, "width": 2*WIDTH_NUMBER_4DIGIT, "write":False},
"dns" : {"index": 3, "name": "dns" , "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"finished" : {"index": 4, "name": "finished", "default": 0, "width": 2*WIDTH_NUMBER_4DIGIT, "write":False},
"dnf" : {"index": 5, "name": "dnf", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"dq" : {"index": 6, "name": "dq", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"race" : {"index": 7, "name": "race", "default": 0, "width": 2*WIDTH_NUMBER_4DIGIT, "write":False},
"check" : {"index": 8, "name": "check", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"-" : {"index": 9, "name": "-", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
}
| [
"[email protected]"
] | |
04c39588a75c7d1646fb96aeb656bbb9548a976f | c1b56d50c68bf32e900349cbab4bfd043a79a237 | /Pythagorean Triplet.py | 231f1b5449311249ea7648796d95434b151ff9d6 | [] | no_license | divanshu79/GeeksForGeeks-solutions | c7a5f0be04e8376e72f933c35fb2d09641fe7130 | caf77aad9c53d5d05c87318806097d750864a6e3 | refs/heads/master | 2020-03-25T07:56:14.997786 | 2018-08-05T06:37:22 | 2018-08-05T06:37:22 | 143,589,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from collections import defaultdict
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
def_dict = defaultdict(int)
sq_list = []
for i in arr:
def_dict[i*i] = 1
sq_list.append(i*i)
sum_list = []
flag = 0
for i in range(n-1):
for j in range(i+1, n):
if def_dict[sq_list[i] + sq_list[j]] == 1:
flag = 1
print(arr[i], arr[j])
break
if flag == 1:
break
if flag == 1:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
189638b913ac8e4f95628be830208ded60454bf1 | 994e5b7156a8c1429238facc1463ad1846f1a89a | /models/official/nlp/xlnet/xlnet_config.py | 95ab092442ef4f4b96e61d91ed391051469e8441 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/Felect_M46O | f0c2a9a6c48695705e0b68c92c3a414bacfaa599 | 6d8b80e216c40233d2c1b9e51fe6f605a3b5ef4b | refs/heads/main | 2023-04-22T11:33:59.448117 | 2021-05-06T13:01:12 | 2021-05-06T13:01:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,317 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used in XLNet model."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import json
import os
import tensorflow as tf
def create_run_config(is_training, is_finetune, flags):
"""Helper function for creating RunConfig."""
kwargs = dict(
is_training=is_training,
use_tpu=flags.use_tpu,
dropout=flags.dropout,
dropout_att=flags.dropout_att,
init_method=flags.init_method,
init_range=flags.init_range,
init_std=flags.init_std,
clamp_len=flags.clamp_len)
if not is_finetune:
kwargs.update(
dict(
mem_len=flags.mem_len,
reuse_len=flags.reuse_len,
bi_data=flags.bi_data,
clamp_len=flags.clamp_len,
same_length=flags.same_length))
return RunConfig(**kwargs)
# TODO(hongkuny): refactor XLNetConfig and RunConfig.
class XLNetConfig(object):
"""Configs for XLNet model.
XLNetConfig contains hyperparameters that are specific to a model checkpoint;
i.e., these hyperparameters should be the same between
pretraining and finetuning.
The following hyperparameters are defined:
n_layer: int, the number of layers.
d_model: int, the hidden size.
n_head: int, the number of attention heads.
d_head: int, the dimension size of each attention head.
d_inner: int, the hidden size in feed-forward layers.
ff_activation: str, "relu" or "gelu".
untie_r: bool, whether to untie the biases in attention.
n_token: int, the vocab size.
"""
def __init__(self, FLAGS=None, json_path=None, args_dict=None):
"""Constructing an XLNetConfig.
One of FLAGS or json_path should be provided.
Args:
FLAGS: An FLAGS instance.
json_path: A path to a json config file.
args_dict: A dict for args.
"""
assert FLAGS is not None or json_path is not None or args_dict is not None
self.keys = [
'n_layer', 'd_model', 'n_head', 'd_head', 'd_inner', 'ff_activation',
'untie_r', 'n_token'
]
if FLAGS is not None:
self.init_from_flags(FLAGS)
if json_path is not None:
self.init_from_json(json_path)
if args_dict is not None:
self.init_from_dict(args_dict)
def init_from_dict(self, args_dict):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
for key in self.keys:
setattr(self, key, args_dict[key])
def init_from_flags(self, flags):
for key in self.keys:
setattr(self, key, getattr(flags, key))
def init_from_json(self, json_path):
with tf.io.gfile.GFile(json_path) as f:
json_data = json.load(f)
self.init_from_dict(json_data)
def to_json(self, json_path):
"""Save XLNetConfig to a json file."""
json_data = {}
for key in self.keys:
json_data[key] = getattr(self, key)
json_dir = os.path.dirname(json_path)
if not tf.io.gfile.exists(json_dir):
tf.io.gfile.makedirs(json_dir)
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(json_data, f, indent=4, sort_keys=True)
class RunConfig(object):
"""Class of RunConfig.
RunConfig contains hyperparameters that could be different
between pretraining and finetuning.
These hyperparameters can also be changed from run to run.
We store them separately from XLNetConfig for flexibility.
"""
def __init__(self,
is_training,
use_tpu,
dropout,
dropout_att,
init_method='normal',
init_range=0.1,
init_std=0.02,
mem_len=None,
reuse_len=None,
bi_data=False,
clamp_len=-1,
same_length=False,
use_cls_mask=True):
"""Initializes RunConfig.
Args:
is_training: bool, whether in training mode.
use_tpu: bool, whether TPUs are used.
dropout: float, dropout rate.
dropout_att: float, dropout rate on attention probabilities.
init_method: str, the initialization scheme, either "normal" or "uniform".
init_range: float, initialize the parameters with a uniform distribution
in [-init_range, init_range]. Only effective when init="uniform".
init_std: float, initialize the parameters with a normal distribution with
mean 0 and stddev init_std. Only effective when init="normal".
mem_len: int, the number of tokens to cache.
reuse_len: int, the number of tokens in the currect batch to be cached and
reused in the future.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
clamp_len: int, clamp all relative distances larger than clamp_len. -1
means no clamping.
same_length: bool, whether to use the same attention length for each
token.
use_cls_mask: bool, whether to introduce cls mask.
"""
self.init_method = init_method
self.init_range = init_range
self.init_std = init_std
self.is_training = is_training
self.dropout = dropout
self.dropout_att = dropout_att
self.use_tpu = use_tpu
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
self.use_cls_mask = use_cls_mask
| [
"[email protected]"
] | |
bc2ec15906048fc42b645664a4552aa614fffaec | 4cbe0eef8694a7f5443e6d276577d3ca08d15456 | /cpt1/noneLenDemo.py | a713e854c6074bac6033c4576a506fd818583169 | [] | no_license | GSIL-Monitor/PythonLearning | 2bf313e366e395df1d27164fe79e16e948094583 | 3f20f9cdff1cef368baa6a2374e6b2cbe3871aa4 | refs/heads/master | 2020-04-19T09:11:45.169704 | 2018-11-28T09:55:01 | 2018-11-28T09:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | s=None
s1 = ''
s2 = ' '
print(len(s1))
print(len(s2))
print(len(s2.strip()))
# print(len(s))
t1 = t2 = t3 = None
print(t1, t2, t3)
| [
"[email protected]"
] | |
29eaf7dca764f8db0e109f82e350645c5ee1f812 | c741f04141784a2571d2d27d95e0d994e4584ab1 | /learning/py3/连接mysql/PyMySQL/test3.py | f72ccb6eb48887eb51cf2b269456a0e175b90e48 | [] | no_license | haodonghui/python | bbdece136620bc6f787b4942d6e1760ed808afd4 | 365062ba54297c81093b7f378742e76d438658b7 | refs/heads/master | 2022-02-03T23:52:37.288503 | 2022-01-27T05:23:25 | 2022-01-27T05:23:25 | 191,729,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from pythonmysql3 import DB
if __name__ == '__main__':
with DB(host='59.110.228.110', port=3306, database='test_tea_uc_0', user='test_tea_uc_0',
passwd='L~+SJ*F^kon[t+10l6') as db:
db.execute('select * from uc_user limit 0,10')
print(db)
for i in db:
print(i)
| [
"[email protected]"
] | |
3ccbf8883c86965571f090c36bced556f00efdd1 | f60ec2c12c6d56be853bec9c222b8ea91b170130 | /apps/pig/src/pig/models.py | a38ff955d4c0be321ef26bdb2d085598b63d858f | [
"Apache-2.0"
] | permissive | jackerxff/hue | b33911f62129cc949096dd48b3fdcf0584bbba69 | 2418050cafd75aab043900c28a867f5c13bc1c0e | refs/heads/master | 2020-12-29T02:54:39.947205 | 2013-04-05T21:25:07 | 2013-04-05T21:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import posixpath
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.lib.exceptions_renderable import PopupException
from hadoop.fs.hadoopfs import Hdfs
from oozie.models import Workflow
class Document(models.Model):
owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('User who can modify the job.'))
is_design = models.BooleanField(default=True, db_index=True, verbose_name=_t('Is a user document, not a document submission.'),
help_text=_t('If the document is not a submitted job but a real query, script, workflow.'))
def is_editable(self, user):
return user.is_superuser or self.owner == user
def can_edit_or_exception(self, user, exception_class=PopupException):
if self.is_editable(user):
return True
else:
raise exception_class(_('Only superusers and %s are allowed to modify this document.') % user)
class PigScript(Document):
_ATTRIBUTES = ['script', 'name', 'properties', 'job_id', 'parameters', 'resources']
data = models.TextField(default=json.dumps({
'script': '',
'name': '',
'properties': [],
'job_id': None,
'parameters': [],
'resources': []
}))
def update_from_dict(self, attrs):
data_dict = self.dict
for attr in PigScript._ATTRIBUTES:
if attrs.get(attr) is not None:
data_dict[attr] = attrs[attr]
self.data = json.dumps(data_dict)
@property
def dict(self):
return json.loads(self.data)
class Submission(models.Model):
script = models.ForeignKey(PigScript)
workflow = models.ForeignKey(Workflow)
def create_or_update_script(id, name, script, user, parameters, resources, is_design=True):
"""This take care of security"""
try:
pig_script = PigScript.objects.get(id=id)
pig_script.can_edit_or_exception(user)
except:
pig_script = PigScript.objects.create(owner=user, is_design=is_design)
pig_script.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources
})
return pig_script
def get_scripts(user, max_count=200):
scripts = []
for script in PigScript.objects.filter(owner=user).order_by('-id')[:max_count]:
data = script.dict
massaged_script = {
'id': script.id,
'name': data['name'],
'script': data['script'],
'parameters': data['parameters'],
'resources': data['resources'],
'isDesign': script.is_design,
}
scripts.append(massaged_script)
return scripts
def get_workflow_output(oozie_workflow, fs):
# TODO: guess from the STORE or parameters
output = None
if 'workflowRoot' in oozie_workflow.conf_dict:
output = oozie_workflow.conf_dict.get('workflowRoot')
if output and not fs.exists(output):
output = None
return output
def hdfs_link(url):
if url:
path = Hdfs.urlsplit(url)[2]
if path:
if path.startswith(posixpath.sep):
return "/filebrowser/view" + path
else:
return "/filebrowser/home_relative_view/" + path
else:
return url
else:
return url
| [
"[email protected]"
] | |
1d1c6159d39366e7b2130cca2ed83d36fab067c6 | c96c79bb7ca3e71d609eab20ed8d68cff8ee7fe7 | /DataStructurePrograms/bankingCashCounter.py | 0049a83ecb8431b52e5fdb75741a8707cd5863a8 | [] | no_license | NikhilDusane222/Python | 25c9eb50bcd5e0e8679ece41d97129b9100e9a91 | 0183c4211a28bbddb6792978cf55da89a682f67a | refs/heads/master | 2021-05-18T13:07:07.059428 | 2020-04-12T17:23:57 | 2020-04-12T17:23:57 | 251,254,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | #Class Queue
class Queue:
def __init__(self):
self.balance = 0
print("Welcome to the Bank Cash Counter..")
print("This is a Banking portal")
#Function for deposite amount
def enqueue_deposit(self):
amount = int(input("Enter amount to be Deposited: "))
self.balance += amount
print("\nAmount Deposited:", amount)
#Function for withdraw amount
def dequeue_withdraw(self):
amount = int(input("Enter amount to be Withdrawn: "))
if self.balance >= amount:
self.balance -= amount
print("\nYou Withdrew:", amount)
else:
print("\nInsufficient balance ")
#Function for display amount
def queue_display(self):
print("\nNet Available Balance=", self.balance)
#Function for exit
def queue_exit(self):
exit()
#Main function
if __name__ == '__main__':
q = Queue()
try:
while True:
print("Please Enter the option that you want to make a transaction:")
#Choice for Deposite and Withdrawn amount
choiceNo = int(input(
" 1. Deposite Amount to the account \n 2. Withdraw Amount from the account \n "
"3. Display the amount \n 4. Cancel Transaction \n"))
if choiceNo == 1:
q.enqueue_deposit()
elif choiceNo == 2:
q.dequeue_withdraw()
elif choiceNo == 3:
q.queue_display()
elif choiceNo == 4:
q.queue_exit()
else:
print("Invalid Choice...!! Press the Correct choice")
except ValueError:
print("Invalid Choice...!! Press the Correct choice")
| [
"[email protected]"
] | |
fb9d2de4608618a90483dce7880ec25859319581 | eb4070d3dda38df8b6d4118343db59d559e58df6 | /week-1/Examples/plot_bostonjuly2012temps.py | 7106e6e834e9c292ae22013b1fc5392a53e0f201 | [] | no_license | RaviTezu/MITx-6.00.2x | df767115085e4f28cfaac20ec90c18453517ed5a | 6effafa89e15e1d59c9302c4a3c9f6ce96da0faa | refs/heads/master | 2021-01-10T16:15:03.999778 | 2016-04-20T11:40:46 | 2016-04-20T11:40:46 | 53,061,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | from __future__ import print_function
import os
import pylab
# It is assumed that the 'julyTemps.txt' file is present along the side of this script and this script is
# executed at the root.
PWD = os.getcwd()
FILE_NAME = 'julyTemps.txt'
FILE = PWD + '/' + FILE_NAME
HIGH = []
LOW = []
def load_file(inFile=FILE):
return open(inFile, 'r')
def read_data(fd=load_file()):
for line in fd.readlines():
fields = line.split()
if len(fields) < 3 or not fields[0].isdigit():
pass
else:
HIGH.append(fields[1])
LOW.append(fields[2])
def calculate_diff(high=HIGH, low=LOW):
diff_temps = [int(h) - int(l) for h, l in zip(high, low)]
return diff_temps
def plotting(diff_temps):
length = len(diff_temps)
print(length)
pylab.figure(1)
pylab.title('Day by Day Ranges in Temperature in Boston in July 2012')
pylab.xlabel('Days')
pylab.ylabel('Temperature Ranges')
pylab.plot(range(1, length + 1), diff_temps)
pylab.show()
if __name__ == "__main__":
read_data()
plotting(calculate_diff())
| [
"[email protected]"
] | |
03cdb1d4773ac7b2357bc6f611f33df1c00e995b | d5eb2fe5d49b581562ae2bc660d08ca80a03d331 | /PythonSandbox/src/leetcode/lc235_lowest_common_ancestor_bst.py | 8a95af9b55836848b2011fec66cdb18da8f848ba | [] | no_license | mcxu/code-sandbox | fd5aa2e593057901d281a0e74db8957777b06cf3 | a785231582bda8578f79982e2dcddd2f2ab559b4 | refs/heads/master | 2023-07-10T02:07:24.180947 | 2023-07-08T03:31:48 | 2023-07-08T03:31:48 | 130,493,607 | 4 | 2 | null | 2023-01-15T22:53:29 | 2018-04-21T16:49:40 | Python | UTF-8 | Python | false | false | 1,624 | py | class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if root == None:
return 0
deepestValidDepthSoFar = 0
validNode = root
# iterative dfs
stack = [(root, 0)] # (node, depth)
while stack:
currItem = stack.pop(-1)
currNode, currDepth = currItem[0], currItem[1]
# print("==== Outer DFS from currNode: ", currNode.val if currNode != None else None)
if currNode != None:
seenValues = set()
# print("Running inner dfs on currNode: ", currNode.val)
self.verifyPandQExistFromRoot(currNode, p, q, seenValues)
# print("seenValues: after: ", seenValues)
pqExistsFromRoot = (p.val in seenValues) and (q.val in seenValues)
# print("pqExistsFromRoot: ", pqExistsFromRoot)
if pqExistsFromRoot and currDepth > deepestValidDepthSoFar:
deepestValidDepthSoFar = currDepth
validNode = currNode
stack.append((currNode.right, currDepth+1))
stack.append((currNode.left, currDepth+1))
return validNode
def verifyPandQExistFromRoot(self, root, p, q, seenValues):
if root == None:
return
if p.val in seenValues and q.val in seenValues:
return
seenValues.add(root.val)
self.verifyPandQExistFromRoot(root.left, p, q, seenValues)
self.verifyPandQExistFromRoot(root.right, p, q, seenValues) | [
"[email protected]"
] | |
fe6e9af8a31baddb7805d28634bc057f5808ce14 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200626132705.py | 8d63e58baefe42cbcb636035be65bad77d03b90f | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 4,579 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593100662316.1593100664951.15; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-session-id={"id":"435818e6a726415f46defffa27f7abc6","time":1593100221937}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100665; mojo-trace-id=17; _lxsdk_s=172ec2bff67-0c2-e9f-c64%7C%7C24__mta=251934006.1593072991075.1593100690175.1593100868002.17; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100868; _lxsdk_s=172ee2f4a3e-1c2-3a1-5a4%7C%7C1__mta=251934006.1593072991075.1593133988033.1593140260525.19; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593134712257.1593134712989.9; mojo-session-id={"id":"b78cc9fcb57a627220ec165f84d9d5a9","time":1593140260318}; mojo-trace-id=1; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593140260; _lxsdk_s=172ee8f28d1-560-08-4aa%7C%7C3',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
def get_urls(url, headers):
response = requests.get(url,headers=header)
bs_info = bs(response.text,"html.parser")
import re
films_url = []
for tag in bs_info.find_all('div',):
for tag_p in tag.find_all('a',href=re.compile('/films/')) :
# 获取top10电影详情页链接
films_url.append(url + tag_p.get('href'))
urls = set(films_url)
return urls
import pandas
# 获取详情页
def get_page_info(self,urls,header):
films_content = []
for url in urls:
content = get_page_content(self,url,header)
films_content.append(content)
return films_content
# 获取单个电影的详情信息
def get_page_brief(url,header):
import re
response = requests.get(url, headers=header)
bs_info = bs(response.text,'html.parser')
# print(response.text)
atag = bs_info.find('div',attrs={'class':'banner'})
film_name = atag.find('h1').text +" "+ atag.find('div',attrs = {'class' : 'ename ellipsis'}).text
film_type = ""
for type in atag.find_all('a',attrs={'target':'_blank'}):
film_type = film_type + type.text
tags = atag.find_all('li')
online_time = tags[-1].text
brief = [film_name,film_type,online_time]
return brief
def save_movies(movies):
movies_data = pd.DataFrame(data=movies)
movies_data.to_csv('./top')
def main():
#urls = get_urls(maoyanUrl,header)
#contents = get_page_info(self,urls,header)
#print(urls)
page_1 = 'https://maoyan.com/films/1375'
brief = get_page_brief(page_1,header)
save_movies(movies)
print(brief)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
d21a1e0fda886e68b04b7b6fb2aae7d62a280eea | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1046.py | 63eee4eecc3a2149468ba16560b7bb2f0123e5f6 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=45
prog.z(input_qubit[1]) # number=46
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[1],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1046.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
d178dcc2efeab0bd3fdac83c2c00a2998ac26b5e | 0cb970785a746a30f9b44b3e5234157818688197 | /Dpython/datatypes/listdatatype/dictionary.py | fdca2679ba3ae6247d7957ec1b3a8f77e78d2354 | [] | no_license | charan2108/pythonprojectsNew | 4255bbb81b6cf0d47c51c131ed93a0bb331a669c | b2f273d44937ec576daa0235d0d0326ff5149bf8 | refs/heads/main | 2023-05-03T16:55:33.242693 | 2021-05-26T11:18:17 | 2021-05-26T11:18:17 | 371,001,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | d = {101:'q', 102:'w', 103:'r'}
print(d)
print(type(d))
d[101] = 'alfa'
print(d)
e = {}
d['a']='apple'
d['b']='gold'
print(e)
| [
"[email protected]"
] | |
98b24527a49dde6f431800e65ba9394fb4c3a89e | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /atcoder/abc288_e.py | 70c4f614907f574c7e19042d8ed2d2ab4cc3fcdb | [] | no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | # https://atcoder.jp/contests/abc288/tasks/abc288_d
# from numba import njit
# from functools import lru_cache
import sys
input = sys.stdin.buffer.readline
INF = 1001001001001001
N, M = map(int, input().split())
A = list(map(int, (input().split())))
C = list(map(int, (input().split())))
X = list(map(int, (input().split())))
dp = [[INF]*(N+1) for _ in range(N+1)]
dp[0][0] = 0
# for i in range(N+1):
# dp[i][0] = 0
cost = [[0]*N for _ in range(N)]
for i in range(N):
for j in range(i+1):
if j==0:
cost[i][j] = C[i]
else:
cost[i][j] = min(cost[i][j-1], C[i-j])
# for i in range(N):
# print(cost[i])
idx = 0
for i in range(N):
for j in range(i+1):
dp[i+1][j+1] = min(dp[i+1][j+1], dp[i][j] + A[i] + cost[i][j])
if idx<M and i==X[idx]-1: continue
dp[i+1][j] = min(dp[i+1][j], dp[i][j])
if idx<M and i==X[idx]-1:
idx += 1
# for i in range(N+1):
# print(dp[i])
ans = INF
for j in range(M, N+1):
ans = min(ans, dp[N][j])
# for i in range(M):
# ans += A[X[i]-1]
print(ans)
# WA
# import sys
# input = sys.stdin.buffer.readline
# # def input(): return sys.stdin.readline().rstrip()
# # sys.setrecursionlimit(10 ** 7)
# import copy
# N, M = map(int, input().split())
# A = list(map(int, (input().split())))
# C = list(map(int, (input().split())))
# X = list(map(int, (input().split())))
# ans = 0
# for i in range(M):
# ans += A[X[i]-1]
# pre = [[]]
# idx = 0
# for i in range(N):
# jj = 0
# if i==X[idx]-1:
# v = C[X[idx]-1]
# u = X[idx] - 1
# for j in range(idx):
# if C[X[idx]-1-j]<v:
# v = C[X[idx]-1-j]
# u = X[idx] - 1
# for j in range(len(pre[u])):
# # print(u, j, pre[u])
# if j<jj:
# if C[u-j-1]: break
# v = C[u-j-1]
# else:
# if v<pre[u][j]+C[u-j-1]: break
# v = pre[u][j]+C[u-j-1]
# jj = max(jj, j+1)
# ans += v
# print(ans, idx, v, u)
# idx += 1
# pre.append(copy.copy(pre[-1]))
# pre[-1].append(A[i] + C[i])
# pre[-1].sort()
# # print(pre)
# print(ans)
| [
"[email protected]"
] | |
89938fbcb47e0b7757adcf91ed9a35f11cc37eeb | a27e43d263375f1ea42d496e18af01f5ad46990e | /modules/initialize.py | d7767bbf8a118b8f1b6dc24808d627c54abdcc1f | [] | no_license | Klim314/Quetzalcoatl | 74565556a26d548f28118137e81866f7dc7a4e7a | 0d78183235207bc9c44c7c099722f5a7203e1d9c | refs/heads/master | 2016-08-06T08:57:19.802511 | 2015-06-24T08:29:53 | 2015-06-24T08:29:53 | 36,220,505 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | #!/usr/bin/env python3
"""
initialize.py
loads the pubdip.ini file
returns a dictionary containing all terms
"""
def execute(target):
res = dict()
with open(target) as f:
for i in f:
if i[0] == '#':
continue
temp = i.split('=')
res[temp[0]] = temp[1].strip()
return res
if __name__ == "__main__":
path = "../pubdip.ini"
print(execute(path))
| [
"[email protected]"
] | |
91a5b6e81692b41a2ffffebed1fa5a58a9cc4ca7 | 2097293065bb28452b221a5f635bac63c69a3e80 | /pizza.py | 60599550eb351267a25b0b28131179907e104ba8 | [
"MIT"
] | permissive | kafkoders/hashcode-pizza | eb2ca3944f62c9c21853b8d0dc2cd34a984984bf | 513452f35299885f396a49113264523a0a6cceae | refs/heads/master | 2020-04-23T22:16:53.542636 | 2019-02-24T19:46:38 | 2019-02-24T19:46:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,483 | py | import pandas as pd
import numpy as np
import math
input_files = ['d_big']
def create_pizza_dataset(file_):
flag = False
elements_ = []
with open(file_ + '.in') as input_:
for line in input_:
if flag is False:
rows, cols, min_ingredients, max_cells = line.split(' ')
flag = True
else:
elements_.append(np.array(list(line.rstrip())))
df = pd.DataFrame(elements_)
pizza_ = df.replace(['M', 'T'], [1, 0])
total_tomatoes = len(pizza_[pizza_.values == 0])
total_mushrooms = len(pizza_[pizza_.values == 1])
less_ingredient = 'tomatoes' if total_tomatoes < total_mushrooms else 'mushrooms'
return pizza_, rows, cols, min_ingredients, max_cells, less_ingredient
def maximize_cuts(max_):
possible_cuts = list()
for j in range(max_, (int(min_ingredients) * 2) - 1, -1):
for i in range(j, 0, -1):
if (j % i) == 0:
item_x = [int(j / i), i]
item_y = [i, int(j / i)]
if item_x not in possible_cuts:
possible_cuts.append(item_x)
if item_y not in possible_cuts:
possible_cuts.append(item_y)
return possible_cuts
class pizzaSlice:
slice_ = None
value_ = 0
def __init__(self, slice_):
self.slice_ = slice_
self.value_ = self.calc_value()
def calc_value(self):
mushrooms = 0
tomatoes = 0
for val in self.slice_:
if pizza_.at[val[0], val[1]] == 1:
mushrooms += 1
elif pizza_.at[val[0], val[1]] == 0:
tomatoes += 1
if less_ingredient == 'tomatoes':
return tomatoes
else:
return mushrooms
def matches_condition(pizza_, pizza_slices):
if not pizza_slices:
return None
else:
min_slice = None
max_cells = 0
for pizza_slice in pizza_slices:
tomatoes = 0
mushrooms = 0
for cell_slice in pizza_slice.slice_:
if pizza_.at[cell_slice[0], cell_slice[1]] == 1:
mushrooms += 1
elif pizza_.at[cell_slice[0], cell_slice[1]] == 0:
tomatoes += 1
if mushrooms >= int(min_ingredients) and tomatoes >= int(min_ingredients):
if min_slice is None:
min_slice = pizza_slice
if min_slice.value_ > pizza_slice.value_ and max_cells < len(pizza_slice.slice_):
max_cells = len(pizza_slice.slice_)
min_slice = pizza_slice
if min_slice is not None:
return min_slice.slice_
else:
return None
def check_cuts(x, y, min_, max_, cuts_):
slices_ = list()
for cut in cuts_:
slice_ = list()
invalid = False
for i in range(cut[0]):
for j in range(cut[1]):
if x + i < pizza_.shape[0] and y + j < pizza_.shape[1] and pizza_.at[x + i, y + j] != 5:
slice_.append([x + i, y + j])
else:
invalid = True
if invalid is False:
slices_.append(pizzaSlice(slice_))
return slices_
if __name__ == '__main__':
for file_ in input_files:
pizza_, rows, cols, min_ingredients, max_cells, less_ingredient = create_pizza_dataset(file_)
good_slices = list()
possible_cuts = maximize_cuts(int(max_cells))
for row_ in range(pizza_.shape[0]):
for col_ in range(pizza_.shape[1]):
if pizza_.at[row_, col_] != 5:
slices_ = check_cuts(row_, col_, int(min_ingredients), int(max_cells), possible_cuts)
slice_ = matches_condition(pizza_, slices_)
if slice_ is not None:
col_final = len(slice_)
good_slices.append([row_, slice_[col_final - 1][0], col_, slice_[col_final - 1][1]])
for element in slice_:
pizza_.at[element[0], element[1]] = 5
with open(file_ + '.out', 'w') as f_:
f_.write(str(len(good_slices)) + "\n")
for value_ in good_slices:
f_.write(str(value_[0]) + " " + str(value_[2]) + " " + str(value_[1]) + " " + str(value_[3]) + "\n") | [
"[email protected]"
] | |
34a80c8dab37022c77f53a2aea2077a2f51aa81b | a0e33f22ed416429e5ed003896d410ab0e82d3eb | /polymodels/managers.py | a08e4ba298a2da0d63b9bcbbeaadcc69656423fd | [
"MIT"
] | permissive | fusionbox/django-polymodels | 37982506c6ea58ae85f44da676cd990b4babc6fd | 0e6caf3932b2d8337d15f9755983c94743317e12 | refs/heads/master | 2020-12-25T10:59:02.520899 | 2016-01-22T00:13:22 | 2016-01-22T00:13:22 | 50,145,841 | 0 | 0 | null | 2016-01-22T00:13:14 | 2016-01-22T00:13:14 | null | UTF-8 | Python | false | false | 2,968 | py | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
class PolymorphicQuerySet(models.query.QuerySet):
def select_subclasses(self, *models):
self.type_cast = True
relateds = set()
accessors = self.model.subclass_accessors
if models:
subclasses = set()
for model in models:
if not issubclass(model, self.model):
raise TypeError(
"%r is not a subclass of %r" % (model, self.model)
)
subclasses.update(model.subclass_accessors)
# Collect all `select_related` required lookups
for subclass in subclasses:
# Avoid collecting ourself and proxy subclasses
related = accessors[subclass][2]
if related:
relateds.add(related)
queryset = self.filter(
**self.model.content_type_lookup(*tuple(subclasses))
)
else:
# Collect all `select_related` required relateds
for accessor in accessors.values():
# Avoid collecting ourself and proxy subclasses
related = accessor[2]
if accessor[2]:
relateds.add(related)
queryset = self
if relateds:
queryset = queryset.select_related(*relateds)
return queryset
def exclude_subclasses(self):
return self.filter(**self.model.content_type_lookup())
def _clone(self, *args, **kwargs):
kwargs.update(type_cast=getattr(self, 'type_cast', False))
return super(PolymorphicQuerySet, self)._clone(*args, **kwargs)
def iterator(self):
iterator = super(PolymorphicQuerySet, self).iterator()
if getattr(self, 'type_cast', False):
for obj in iterator:
yield obj.type_cast()
else:
# yield from iterator
for obj in iterator:
yield obj
class PolymorphicManager(models.Manager.from_queryset(PolymorphicQuerySet)):
use_for_related_fields = True
def contribute_to_class(self, model, name):
# Avoid circular reference
from .models import BasePolymorphicModel
if not issubclass(model, BasePolymorphicModel):
raise ImproperlyConfigured(
'`%s` can only be used on '
'`BasePolymorphicModel` subclasses.' % self.__class__.__name__
)
return super(PolymorphicManager, self).contribute_to_class(model, name)
def get_queryset(self):
queryset = super(PolymorphicManager, self).get_queryset()
model = self.model
opts = model._meta
if opts.proxy:
# Select only associated model and its subclasses.
queryset = queryset.filter(**self.model.subclasses_lookup())
return queryset
| [
"[email protected]"
] | |
2ee2dcf3dcd8da231a4ddae3d99e7792e2185f23 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/3o.py | 6d08125d0b91f6c1763e4b9719945ab4e63276fb | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == '3O':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
90b417bedd17743c79571e8607da6f6a022d1f12 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03024/s808391195.py | bd7e39cfcdbb36cf608ec3f0dbb696430bf5c305 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | s = list(input())
counter = 0
k = len(s)
for i in range(k):
if s[i] == "o":
counter += 1
if counter+(15-k) >= 8:
print("YES")
else:
print("NO") | [
"[email protected]"
] | |
6e69b5b44498b70dbb7ec604c2bc824d7cd54d73 | 31e10d5f9bbdf768a2b6aae68af0c2105b43120c | /web+多线程/miniweb框架/web服务器/application/utils.py | fc5a398094a52747dd43aa00a08d209b8d724c5b | [] | no_license | 664120817/python-test | 6d0ce82923b3e7974f393fc8590c5e47e4117781 | 418085378ca0db8019e4fa3b5564daebed0e6163 | refs/heads/master | 2023-02-16T13:10:55.403774 | 2022-08-02T17:01:52 | 2022-08-02T17:01:52 | 200,843,808 | 8 | 11 | null | 2023-02-15T16:53:44 | 2019-08-06T12:08:19 | Python | UTF-8 | Python | false | false | 435 | py | def create_http_response(status,response_body):
# 拼接响应
request_line = "HTTP/1.1 {}\r\n".format(status) # 请求行
request_header = "Server:python80WS/2.1;charset=UTF-8 \r\n" # 请求头
request_header += "Content-Type:text/html\r\n"
request_blank = "\r\n" # 请求空行
request_data = (request_line + request_header + request_blank).encode() + response_body # 整体拼接
return request_data | [
"[email protected]"
] | |
ee9ea4d11f545f46aa88dcf699a6500010c37f2d | c6d9e353d19e0b92da72602ce274493dbb179525 | /Setup_custom.py | ca095135168082bb68b2205c98650d75d777c9fc | [
"BSL-1.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | davidbrownell/Common_cpp_Common | a337f0d589316f28950e93acd518d4e82b7cc14a | 7346273b79628514af1c584c447003a638def15d | refs/heads/master | 2022-03-01T19:31:12.571884 | 2022-01-03T17:56:37 | 2022-01-03T17:56:37 | 187,749,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,579 | py | # ----------------------------------------------------------------------
# |
# | Setup_custom.py
# |
# | David Brownell <[email protected]>
# | 2018-05-03 22:12:13
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Performs repository-specific setup activities."""
# ----------------------------------------------------------------------
# |
# | To setup an environment, run:
# |
# | Setup(.cmd|.ps1|.sh) [/debug] [/verbose] [/configuration=<config_name>]*
# |
# ----------------------------------------------------------------------
import os
import shutil
import sys
from collections import OrderedDict
import CommonEnvironment
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# <Missing function docstring> pylint: disable = C0111
# <Line too long> pylint: disable = C0301
# <Wrong hanging indentation> pylint: disable = C0330
# <Class '<name>' has no '<attr>' member> pylint: disable = E1103
# <Unreachable code> pylint: disable = W0101
# <Wildcard import> pylint: disable = W0401
# <Unused argument> pylint: disable = W0613
fundamental_repo = os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL")
assert os.path.isdir(fundamental_repo), fundamental_repo
sys.path.insert(0, fundamental_repo)
from RepositoryBootstrap import * # <Unused import> pylint: disable = W0614
from RepositoryBootstrap.SetupAndActivate import CurrentShell # <Unused import> pylint: disable = W0614
from RepositoryBootstrap.SetupAndActivate.Configuration import * # <Unused import> pylint: disable = W0614
del sys.path[0]
from _custom_data import _CUSTOM_DATA
# ----------------------------------------------------------------------
# There are two types of repositories: Standard and Mixin. Only one standard
# repository may be activated within an environment at a time while any number
# of mixin repositories can be activated within a standard repository environment.
# Standard repositories may be dependent on other repositories (thereby inheriting
# their functionality), support multiple configurations, and specify version
# information for tools and libraries in themselves or its dependencies.
#
# Mixin repositories are designed to augment other repositories. They cannot
# have configurations or dependencies and may not be activated on their own.
#
# These difference are summarized in this table:
#
# Standard Mixin
# -------- -----
# Can be activated in isolation X
# Supports configurations X
# Supports VersionSpecs X
# Can be dependent upon other repositories X
# Can be activated within any other Standard X
# repository
#
# Consider a script that wraps common Git commands. This functionality is useful
# across a number of different repositories, yet doesn't have functionality that
# is useful on its own; it provides functionality that augments other repositories.
# This functionality should be included within a repository that is classified
# as a mixin repository.
#
# To classify a repository as a Mixin repository, decorate the GetDependencies method
# with the MixinRepository decorator.
#
# @MixinRepository # <-- Uncomment this line to classify this repository as a mixin repository
def GetDependencies():
"""
Returns information about the dependencies required by this repository.
The return value should be an OrderedDict if the repository supports multiple configurations
(aka is configurable) or a single Configuration if not.
"""
d = OrderedDict()
if CurrentShell.CategoryName == "Windows":
architectures = ["x64", "x86"]
else:
# Cross compiling on Linux is much more difficult on Linux than it is on
# Windows. Only support the current architecture.
architectures = [CurrentShell.Architecture]
for architecture in architectures:
d[architecture] = Configuration(
architecture,
[
Dependency(
"0EAA1DCF22804F90AD9F5A3B85A5D706",
"Common_Environment",
"python36",
"https://github.com/davidbrownell/Common_Environment_v3.git",
)
],
)
return d
# ----------------------------------------------------------------------
def GetCustomActions(debug, verbose, explicit_configurations):
"""
Returns an action or list of actions that should be invoked as part of the setup process.
Actions are generic command line statements defined in
<Common_Environment>/Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/Shell/Commands/__init__.py
that are converted into statements appropriate for the current scripting language (in most
cases, this is Bash on Linux systems and Batch or PowerShell on Windows systems.
"""
actions = []
for tool, version_infos in _CUSTOM_DATA:
for version, operating_system_infos in version_infos:
for operating_system, hash in operating_system_infos:
if CurrentShell.CategoryName != operating_system:
continue
tool_dir = os.path.join(
_script_dir,
"Tools",
tool,
version,
operating_system,
)
assert os.path.isdir(tool_dir), tool_dir
actions += [
CurrentShell.Commands.Execute(
'python "{script}" Install "{tool} - {version}" "{uri}" "{dir}" "/unique_id={hash}" /unique_id_is_hash'.format(
script=os.path.join(
os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL"),
"RepositoryBootstrap",
"SetupAndActivate",
"AcquireBinaries.py",
),
tool=tool,
version=version,
uri=CommonEnvironmentImports.FileSystem.FilenameToUri(
os.path.join(tool_dir, "Install.7z"),
),
dir=tool_dir,
hash=hash,
),
),
]
# Perform actions that must be completed after all other actions have completed
actions.append(
CurrentShell.Commands.Execute(
'python "{}"'.format(os.path.join(_script_dir, "Setup_epilogue.py")),
),
)
return actions
| [
"[email protected]"
] | |
24053881224fa4eeef0ad6eded09146927976cc0 | 5b3d8f56f4d18dc8809f9f5aa7d2a7089cdbf489 | /.c9/metadata/workspace/FrequenciesMain/PMchecksSPINT.py | 3fc1d903ca16a8e76e8844d6096538a62339ba3c | [] | no_license | heyliljill/edpsych-cloned | 89ba1a827ed66651b7387b25bc2c188ff344e8d1 | ba02e4789e390bb6488b11608b994ee5678a4b30 | refs/heads/master | 2020-07-26T00:51:41.004018 | 2019-09-14T17:26:45 | 2019-09-14T17:26:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,118 | py | {"filter":false,"title":"PMchecksSPINT.py","tooltip":"/FrequenciesMain/PMchecksSPINT.py","undoManager":{"mark":18,"position":18,"stack":[[{"start":{"row":2,"column":18},"end":{"row":2,"column":19},"action":"insert","lines":["S"],"id":2}],[{"start":{"row":2,"column":19},"end":{"row":2,"column":20},"action":"insert","lines":["P"],"id":3}],[{"start":{"row":2,"column":20},"end":{"row":2,"column":21},"action":"insert","lines":["I"],"id":4}],[{"start":{"row":2,"column":21},"end":{"row":2,"column":22},"action":"insert","lines":["N"],"id":5}],[{"start":{"row":2,"column":22},"end":{"row":2,"column":23},"action":"insert","lines":["T"],"id":6}],[{"start":{"row":6,"column":0},"end":{"row":7,"column":0},"action":"insert","lines":["",""],"id":7}],[{"start":{"row":7,"column":0},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":8}],[{"start":{"row":8,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":["for i in range(0,2):"," ## SELECT HIGH INTEREST ## "," filtersign = [\"~=\",\"=\"]"," if mathread == \"m\":"," spint = \"mathspint\""," elif mathread == \"r\":"," spint = \"readspint\""," "," filterText = \"\"\"USE ALL. \\nCOMPUTE filter_$=(\"\"\"+spint+filtersign[i]+ \"2\"+\"\"\"). \\nVARIABLE LABELS filter_$ '\"\"\" + spint+filtersign[i]+ \"2\" +\"\"\"(FILTER)'. \\nVALUE LABELS filter_$ 0 'Not Selected' 1 'Selected'. \\nFORMATS filter_$ (f1.0). \\nFILTER BY filter_$. \\nEXECUTE.\\n\\n\"\"\""," "," f.write(filterText)"," "],"id":9}],[{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "],"id":10},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"insert","lines":[" "]},{"start":{"row":28,"column":0},"end":{"row":28,"column":4},"action":"insert","lines":[" "]},{"start":{"row":29,"column":0},"end":{"row":29,"column":4},"action":"insert","lines":[" "]},{"start":{"row":30,"column":0},"end":{"row":30,"column":4},"action":"insert","lines":[" "]},{"start":{"row":31,"column":0},"end":{"row":31,"column":4},"action":"insert","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"insert","lines":[" "]},{"start":{"row":33,"column":0},"end":{"row":33,"column":4},"action":"insert","lines":[" "]},{"start":{"row":34,"column":0},"end":{"row":34,"column":4},"action":"insert","lines":[" "]},{"start":{"row":35,"column":0},"end":{"row":35,"column":4},"action":"insert","lines":[" "]},{"start":{"row":36,"column":0},"end":{"row":36,"column":4},"action":"insert","lines":[" "]},{"start":{"row":37,"column":0},"end":{"row":37,"column":4},"action":"insert","lines":[" "]},{"start":{"row":38,"column":0},"end":{"row":38,"column":4},"action":"insert","lines":[" "]},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"insert","lines":[" "]},{"start":{"row":41,"column":0},"end":{"row":41,"column":4},"action":"insert","lines":[" "]},{"start":{"row":42,"column":0},"end":{"row":42,"column":4},"action":"insert","lines":[" "]},{"start":{"row":43,"column":0},"end":{"row":43,"column":4},"action":"insert","lines":[" "]},{"start":{"row":44,"column":0},"end":{"row":44,"column":4},"action":"insert","lines":[" "]},{"start":{"row":45,"column":0},"end":{"row":45,"column":4},"action":"insert","lines":[" "]},{"start":{"row":46,"column":0},"end":{"row":46,"column":4},"action":"insert","lines":[" "]},{"start":{"row":47,"column":0},"end":{"row":47,"column":4},"action":"insert","lines":[" "]},{"start":{"row":48,"column":0},"end":{"row":48,"column":4},"action":"insert","lines":[" "]},{"start":{"row":49,"column":0},"end":{"row":49,"column":4},"action":"insert","lines":[" "]},{"start":{"row":50,"column":0},"end":{"row":50,"column":4},"action":"insert","lines":[" "]},{"start":{"row":51,"column":0},"end":{"row":51,"column":4},"action":"insert","lines":[" "]},{"start":{"row":52,"column":0},"end":{"row":52,"column":4},"action":"insert","lines":[" "]},{"start":{"row":53,"column":0},"end":{"row":53,"column":4},"action":"insert","lines":[" "]},{"start":{"row":54,"column":0},"end":{"row":54,"column":4},"action":"insert","lines":[" "]},{"start":{"row":55,"column":0},"end":{"row":55,"column":4},"action":"insert","lines":[" "]},{"start":{"row":56,"column":0},"end":{"row":56,"column":4},"action":"insert","lines":[" "]},{"start":{"row":57,"column":0},"end":{"row":57,"column":4},"action":"insert","lines":[" "]},{"start":{"row":58,"column":0},"end":{"row":58,"column":4},"action":"insert","lines":[" "]},{"start":{"row":59,"column":0},"end":{"row":59,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":59,"column":62},"end":{"row":60,"column":0},"action":"insert","lines":["",""],"id":11},{"start":{"row":60,"column":0},"end":{"row":60,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":60,"column":8},"end":{"row":61,"column":0},"action":"insert","lines":["",""],"id":12},{"start":{"row":61,"column":0},"end":{"row":61,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":61,"column":8},"end":{"row":62,"column":0},"action":"insert","lines":["",""],"id":13},{"start":{"row":62,"column":0},"end":{"row":62,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":62,"column":4},"end":{"row":62,"column":8},"action":"remove","lines":[" "],"id":14}],[{"start":{"row":62,"column":0},"end":{"row":62,"column":4},"action":"remove","lines":[" "],"id":15}],[{"start":{"row":62,"column":0},"end":{"row":64,"column":12},"action":"insert","lines":["f.write(\"\"\"FILTER OFF.","USE ALL.","EXECUTE.\"\"\")"],"id":16}],[{"start":{"row":7,"column":0},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":17}],[{"start":{"row":8,"column":0},"end":{"row":9,"column":0},"action":"insert","lines":["",""],"id":18}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":15},"action":"insert","lines":["mathread == \"m\""],"id":19}],[{"start":{"row":8,"column":10},"end":{"row":8,"column":11},"action":"remove","lines":["="],"id":20}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":8,"column":0},"end":{"row":20,"column":23},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1456345978891,"hash":"4ea48e1405f9aea3952e15cbcbc5d2706399170d"} | [
"[email protected]"
] | |
be89e3bb2bcbb432edbcf5ef7805532ee5823d5d | 30dc32fd39cf71c76fc24d53b68a8393adcac149 | /OWDTestToolkit/apps/Marketplace/__main.py | a24d1a6405e92cfdd242bbf8fe55cd7389288a89 | [] | no_license | carlosmartineztoral/OWD_TEST_TOOLKIT | 448caefdc95bc3e54aad97df0bff7046ffb37be1 | 50768f79488735eba8355824f5aa3686a71d560a | refs/heads/master | 2021-01-15T17:14:03.614981 | 2013-06-11T12:48:18 | 2013-06-11T12:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | from OWDTestToolkit.global_imports import *
import installApp ,\
searchForApp ,\
selectSearchResultApp
class Marketplace (
installApp.main,
searchForApp.main,
selectSearchResultApp.main):
def __init__(self, p_parent):
self.apps = p_parent.apps
self.data_layer = p_parent.data_layer
self.parent = p_parent
self.marionette = p_parent.marionette
self.UTILS = p_parent.UTILS
def launch(self):
#
# Launch the app.
#
self.apps.kill_all()
# WARNING: Marketplace is in a weird place - you need to use "Marketplace Dev"!!
# self.app = self.apps.launch(self.__class__.__name__)
self.UTILS.logResult("info",
"About to launch the marketplace app from the dev server. " + \
"If it's \"not found\" then either try again later, or contact #marketplace mozilla irc channel.")
self.app = self.apps.launch("Marketplace Dev")
self.UTILS.waitForNotElements(DOM.Market.market_loading_icon,
self.__class__.__name__ + " app - loading icon",
True,
30)
| [
"[email protected]"
] | |
6d2f69de2487fa86a348999f7695b0190ce4b725 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /3995.py | a3eefaf7f66a32547cbdcc5db18db51791b52a02 | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def check(x):
if sum([int(s) for s in oct(x)[2:]]) % 19 != 0:
return False
m = 1
for s in oct(x)[2:]:
m *= int(s)
return m % 5 == 0
cnt = 0
minimal = 0
for x in range(12345, 67890+1):
if check(x):
cnt += 1
if cnt == 1:
minimal = x
print(cnt, minimal) | [
"[email protected]"
] | |
4220d040287852ff2cb51884d1f88a13f9e80009 | af9268e1ead8cdb491868c14a2240d9e44fb3b56 | /last-minute-env/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_static.py | 62b8691f9c135756c86c3975ad0fb508ab08de89 | [] | no_license | frosqh/Cousinade2017 | d5154c24c93ca8089eeba26b53c594e92cb6bd82 | c34d5707af02402bf2bb7405eddc91297da399ff | refs/heads/master | 2021-01-20T07:57:34.586476 | 2017-10-22T18:42:45 | 2017-10-22T18:42:45 | 90,074,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django.template import Library
from django.templatetags.static import static as _static
register = Library()
@register.simple_tag
def static(path):
# Backwards compatibility alias for django.templatetags.static.static().
# Deprecation should start in Django 2.0.
return _static(path)
| [
"[email protected]"
] | |
558a6dcac84f11a72034f4701f4143645c0414fd | 63b864deda44120067eff632bbb4969ef56dd573 | /object_detection/fast rcnn/roi.py | f7f8c76fbc257a5e40c8450b8615c8b335e4a852 | [] | no_license | lizhe960118/Deep-Learning | d134592c327decc1db12cbe19d9a1c85a5056086 | 7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b | refs/heads/master | 2021-10-29T06:15:04.749917 | 2019-07-19T15:27:25 | 2019-07-19T15:27:25 | 152,355,392 | 5 | 2 | null | 2021-10-12T22:19:33 | 2018-10-10T03:06:44 | Jupyter Notebook | UTF-8 | Python | false | false | 1,310 | py | import numpy as np
import torch
import torch.nn as nn
class ROIPool(nn.Module):
def __init__(self, output_size):
super().__init__()
self.maxpool = nn.AdaptiveMaxPool2d(output_size)
self.size = output_size
def forward(self, images, rois, roi_idx):
# images:特征图 image_batchsize * channels * h * w
# rois:[[x1,y1,x2,y2], ...] n * 4
# roi_idx:[4,5,8,7] n * 1, roi_idx[i]保存的是rois[i]对应的是哪个特征图
n = rois.shape[0] # 有多少个建议框
h = images.size(2)
w = images.size(3)
x1 = rois[:,0] # 提取框的位置,此处缩放为到(0,1)
y1 = rois[:,1]
x2 = rois[:,2]
y2 = rois[:,3]
x1 = np.floor(x1 * w).astype(int) # 回归到特征图的位置
x2 = np.ceil(x2 * w).astype(int)
y1 = np.floor(y1 * h).astype(int)
y2 = np.ceil(y2 * h).astype(int)
res = []
for i in range(n):
img = images[roi_idx[i]].unsqueeze(0)
img = img[:, :, y1[i]:y2[i], x1[i]:x2[i]]
img = self.maxpool(img) # 调用的self.maxpool直接输出output_size*output_size大小的特征图
res.append(img)
res = torch.cat(res, dim=0) # n * output_size * output_size
return res
| [
"[email protected]"
] | |
bd6d1d5a395d1a59e39358b8164d34d56dbcb1cb | 82e78f606f8c203cb77b1e3e8fd3b13158f31af8 | /thenewboston/transactions/validation.py | 8612d11da9a8ba93fe0d80accb79bbd627413987 | [
"MIT"
] | permissive | rajat4665/thenewboston-python | 1f0b8aea02fb8dbfb2eea60cd1ef07ac12fad667 | df842c793fe7bfd8731fd8746abf25747c9e569e | refs/heads/master | 2022-11-26T00:46:54.848608 | 2020-07-26T00:12:06 | 2020-07-26T00:12:06 | 283,263,021 | 0 | 0 | MIT | 2020-07-28T16:07:08 | 2020-07-28T16:07:07 | null | UTF-8 | Python | false | false | 403 | py | def validate_transaction_exists(*, amount, error, recipient, txs):
"""
Check for the existence of a Tx
"""
tx = next((tx for tx in txs if tx.get('amount') == amount and tx.get('recipient') == recipient), None)
if not tx:
raise error({
'error_message': 'Tx not found',
'expected_amount': amount,
'expected_recipient': recipient
})
| [
"[email protected]"
] | |
df9384d60dcde3fb318a9b646d98debfab15d79a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03578/s404612965.py | ac57b84158a4259a926ce398a0358c3c359d58d5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # -*- coding: utf-8 -*-
import sys
from collections import deque, defaultdict
from math import sqrt, factorial
# def input(): return sys.stdin.readline()[:-1] # warning not \n
# def input(): return sys.stdin.buffer.readline().strip() # warning bytes
# def input(): return sys.stdin.buffer.readline().decode('utf-8')
def solve():
n = int(input())
d = defaultdict(int)
a = [int(x) for x in input().split()]
for e in a:
d[e] += 1
m = int(input())
t = [int(x) for x in input().split()]
for e in t:
if d[e]:
d[e] -= 1
else:
print("NO")
return
print("YES")
t = 1
# t = int(input())
for case in range(1,t+1):
ans = solve()
"""
1 + k
"""
| [
"[email protected]"
] | |
69c5f69164eed21cf0ed953345f5fed4d702daf5 | 1633258aff76252b660534eae6d70a9e95a468ec | /cost_management/urls.py | 4443c8d4c8e5ca1952519e7048671ed5a7cfe38d | [] | no_license | kxplorer/banglai-django | 7077117f66128cb2bbaa8d50c1a28c076b303987 | 0d764f744ef165b078e856eb9374dba93cb614e8 | refs/heads/master | 2021-09-24T20:27:14.726832 | 2018-09-16T08:50:42 | 2018-09-16T08:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from django.urls import path
from . import views
urlpatterns = [
path('list/', views.my_expense, name='cost-list'),
path('add/', views.add_expense, name='add-expense'),
path('edit/<int:expense_id>/', views.edit_expense, name='edit-expense'),
path('delete/<int:expense_id>/', views.delete_expense, name='delete-expense'),
]
| [
"[email protected]"
] | |
ed531ac39f4e836f0ef9223d8913f55327376982 | 8c825730f6fd253e58902b150a9800de8f766943 | /capture/noworkflow/now/cmd/cmd_history.py | 2e5d8da0a5ca9eab5b4e964956f6cef37e97c90f | [
"MIT"
] | permissive | rmparanhos/noworkflow | aeb92695c34e65edf9cc4d4dc31d80467b085773 | 8f703a14503345568e91957659b43654036f8154 | refs/heads/master | 2020-05-17T12:39:04.231204 | 2019-06-21T03:42:49 | 2019-06-21T03:42:49 | 183,716,529 | 0 | 0 | null | 2019-04-27T01:58:31 | 2019-04-27T01:58:31 | null | UTF-8 | Python | false | false | 1,849 | py | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""'now history' command"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import os
from ..ipython.converter import create_ipynb
from ..persistence.models.history import History as HistoryModel
from ..persistence import persistence_config
from .command import NotebookCommand
class History(NotebookCommand):
"""Show project history"""
def add_arguments(self):
add_arg = self.add_argument
add_arg("-s", "--script", type=str, default="*",
help="show history of specific script")
add_arg("-e", "--status", type=str, default="*",
choices=["*", "finished", "unfinished", "backup"],
help="show only trials in a specific status")
add_arg("--dir", type=str,
help="set demo path. Default to CWD/demo<number>"
"where <number> is the demo identification")
def execute(self, args):
persistence_config.connect_existing(args.dir or os.getcwd())
history = HistoryModel(script=args.script, status=args.status)
print(history)
def execute_export(self, args):
code = ("%load_ext noworkflow\n"
"import noworkflow.now.ipython as nip\n"
"# <codecell>\n"
"history = nip.History()\n"
"# history.graph.width = 700\n"
"# history.graph.height = 300\n"
"# history.script = '*'\n"
"# history.status = '*'\n"
"# <codecell>\n"
"history")
create_ipynb("History.ipynb", code)
| [
"[email protected]"
] | |
cb0abb7803753d6eb75cdac081833a6020167949 | 821f403a3afc9055d40893eca033c369a4c3831e | /Easy/No206.py | c2200da24597a13f4e107a7fd6caac6856ee93e2 | [] | no_license | kikihiter/LeetCode2 | 29f91b6992a01ba23e7da04b2b2c862410cc563b | 7167f1a7c6cb16cca63675c80037682752ee2a7d | refs/heads/master | 2023-05-01T03:45:44.482932 | 2021-05-19T13:12:16 | 2021-05-19T13:12:16 | 277,283,525 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
temp = None # 空,用来存储上一个节点信息
while head != None:
nextNode = head.next
head.next = temp
temp = head
head = nextNode
return temp | [
"[email protected]"
] | |
dff0eb2acf4da0a475156ff795a327f9c89bcde3 | a5ada23f0c9d429cd7afa2351368e46bc23255e4 | /meta_models/meta_layers/conv3d_meta_layer.py | b3542d2d43a0dafcd10873c1e253f60dafba31d8 | [
"MIT"
] | permissive | AnacletoLAB/meta_models | ef6df0205f88832897e7ebdcd8057635b90024a9 | 9c70eb0bf080f0ec4bd24b7764f0f71d92d467d5 | refs/heads/master | 2023-04-11T14:01:47.678710 | 2021-04-27T08:25:53 | 2021-04-27T08:25:53 | 286,005,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | """Class implementing meta-model for a Conv3D Layer."""
from typing import Dict
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv3D,
Layer)
from .regularized_meta_layer import RegularizedMetaLayer
from ..utils import distributions
class Conv3DMetaLayer(RegularizedMetaLayer):
"""Class implementing meta-layer for tri-dimensional convolutional layers.
Private members
------------------------
_min_filters: int,
Minimum number of filters to use for the layer.
_max_filters: int,
Maximum number of filters to use for the layer.
_min_x_kernel_size: int,
Minimum size of the kernel on the lenght axis.
_max_x_kernel_size: int,
Maximum size of the kernel on the lenght axis.
_min_y_kernel_size: int,
Minimum size of the kernel on the depth axis.
_max_y_kernel_size: int,
Maximum size of the kernel on the depth axis.
_min_z_kernel_size: int,
Minimum size of the kernel on the height axis.
_max_z_kernel_size: int,
Maximum size of the kernel on the height axis.
_activation: str,
The activation function to use for the layer.
"""
def __init__(
self,
min_filters: int = 0,
max_filters: int = 256,
min_x_kernel_size: int = 1,
max_x_kernel_size: int = 5,
min_y_kernel_size: int = 1,
max_y_kernel_size: int = 5,
min_z_kernel_size: int = 1,
max_z_kernel_size: int = 5,
activation: str = "relu",
**kwargs: Dict
):
"""Create new Conv3DResidualLayer meta-model object.
Parameters
----------------------
min_filters: int = 0,
Minimum number of filters (neurons) in each layer.
If the tuning process passes 0, then the layer is skipped.
max_filters: int = 256,
Maximum number of filters (neurons) in each layer.
min_x_kernel_size: int = 1,
Minimum size of the kernel on the lenght axis.
max_x_kernel_size: int = 5,
Maximum size of the kernel on the lenght axis.
min_y_kernel_size: int = 1,
Minimum size of the kernel on the depth axis.
max_y_kernel_size: int = 5,
Maximum size of the kernel on the depth axis.
min_z_kernel_size: int = 1,
Minimum size of the kernel on the height axis.
max_z_kernel_size: int = 5,
Maximum size of the kernel on the height axis.
activation: str = "relu",
The activation function to use for the layer.
**kwargs: Dict,
Dictionary of keyword parameters to be passed to parent class.
"""
super().__init__(**kwargs)
self._min_filters = min_filters
self._max_filters = max_filters
self._min_x_kernel_size = min_x_kernel_size
self._max_x_kernel_size = max_x_kernel_size
self._min_y_kernel_size = min_y_kernel_size
self._max_y_kernel_size = max_y_kernel_size
self._min_z_kernel_size = min_z_kernel_size
self._max_z_kernel_size = max_z_kernel_size
self._activation = activation
def _space(self) -> Dict:
"""Return hyper parameters of the layer."""
return {
"filters": (distributions.integer, self._min_filters, self._max_filters),
"x_kernel_size": (distributions.integer, self._min_x_kernel_size, self._max_x_kernel_size),
"y_kernel_size": (distributions.integer, self._min_y_kernel_size, self._max_y_kernel_size),
"z_kernel_size": (distributions.integer, self._min_z_kernel_size, self._max_z_kernel_size),
**super()._space()
}
def _build(
self,
input_layers: Layer,
filters: int,
x_kernel_size: int,
y_kernel_size: int,
z_kernel_size: int,
strides: int = (1, 1, 1),
**kwargs: Dict
) -> Layer:
"""Return built Conv3D layer block.
If the given filters number is equal to 0, the layer is skipped.
Parameters
--------------------------
input_layers: Layer,
The input layer of the current layer.
filters: int,
The number of neurons of the layer.
x_kernel_size: int,
The dimension of the kernel for the layer, on the length axis.
y_kernel_size: int,
The dimension of the kernel for the layer, on the depth axis.
z_kernel_size: int,
The dimension of the kernel for the layer, on the height axis.
strides: int = (1, 1),
Strides for the convolutional layer.
**kwargs: Dict,
The kwargs to pass to the kernel regularizers.
Returns
--------------------------
Output layer of the block.
"""
filters = round(filters)
x_kernel_size = round(x_kernel_size)
y_kernel_size = round(y_kernel_size)
z_kernel_size = round(z_kernel_size)
if filters == 0:
return input_layers
layer = Conv3D(
filters=filters,
kernel_size=(x_kernel_size, y_kernel_size, z_kernel_size),
strides=strides,
padding="same",
**self._build_regularizers(**kwargs)
)(input_layers)
if self._batch_normalization:
layer = BatchNormalization()(layer)
activation = Activation(self._activation)(layer)
return activation
| [
"[email protected]"
] | |
04747c7c8266e99f1a85acf17f1ae88fef5da79d | 03d68f032ab0e8cf269413d0309fc6d36281504f | /src/l2hmc/utils/tensorflow/history.py | d66fe35509b67f88da6d0b9dd0b405dac0889a21 | [
"Apache-2.0"
] | permissive | saforem2/l2hmc-qcd | 560026cd4d63f786247170a2b8641a7402b7e81e | 46ada488bc5c8b0a31be0bf23ea11b95b3b06767 | refs/heads/main | 2023-09-06T03:20:19.577196 | 2023-08-23T19:26:58 | 2023-08-23T19:26:58 | 176,870,361 | 57 | 8 | Apache-2.0 | 2023-08-23T18:56:02 | 2019-03-21T04:32:54 | Jupyter Notebook | UTF-8 | Python | false | false | 1,812 | py | """
tensorflow/history.py
Implements tfHistory, containing minor modifications from base History class.
"""
from __future__ import absolute_import, print_function, division, annotations
from typing import Any
import tensorflow as tf
import numpy as np
from l2hmc.utils.history import BaseHistory
class History(BaseHistory):
def update(self, metrics: dict) -> dict:
avgs = {}
era = metrics.get('era', 0)
for key, val in metrics.items():
avg = None
if isinstance(val, (float, int)):
avg = val
else:
if isinstance(val, dict):
for k, v in val.items():
key = f'{key}/{k}'
try:
avg = self._update(key=key, val=v)
# TODO: Figure out how to deal with exception
except tf.errors.InvalidArgumentError:
continue
else:
avg = self._update(key=key, val=val)
if avg is not None:
avgs[key] = avg
try:
self.era_metrics[str(era)][key].append(avg)
except KeyError:
self.era_metrics[str(era)][key] = [avg]
return avgs
def _update(self, key: str, val: Any) -> float:
if val is None:
raise ValueError(f'None encountered: {key}: {val}')
if isinstance(val, list):
val = np.array(val)
try:
self.history[key].append(val)
except KeyError:
self.history[key] = [val]
if isinstance(val, (float, int)):
return val
try:
return tf.reduce_mean(val)
except Exception:
return val
| [
"[email protected]"
] | |
2bc39bcc1beee6e7c11228aeb0f44298cf6663e7 | 50fb142226d75ed4a9d991555b9ee266f02260e5 | /include/HydrusThreading.py | bb9db952a7e15cd4f0b5f668c9b9a4383f881b68 | [
"WTFPL"
] | permissive | codelizard42/hydrus | 85a4ee0f90f96de01e2fcc0336d8bc57dcf418fe | 4bb6c317040819c87bf6085f74620441587ef2d1 | refs/heads/master | 2020-04-05T22:48:40.514900 | 2018-12-12T22:15:46 | 2018-12-12T22:15:46 | 30,299,239 | 1 | 0 | null | 2015-02-04T13:09:35 | 2015-02-04T13:09:34 | null | UTF-8 | Python | false | false | 17,836 | py | import bisect
import collections
import HydrusExceptions
import Queue
import random
import threading
import time
import traceback
import HydrusData
import HydrusGlobals as HG
import os
NEXT_THREAD_CLEAROUT = 0
THREADS_TO_THREAD_INFO = {}
THREAD_INFO_LOCK = threading.Lock()
def ClearOutDeadThreads():
with THREAD_INFO_LOCK:
all_threads = list( THREADS_TO_THREAD_INFO.keys() )
for thread in all_threads:
if not thread.is_alive():
del THREADS_TO_THREAD_INFO[ thread ]
def GetThreadInfo( thread = None ):
global NEXT_THREAD_CLEAROUT
if HydrusData.TimeHasPassed( NEXT_THREAD_CLEAROUT ):
ClearOutDeadThreads()
NEXT_THREAD_CLEAROUT = HydrusData.GetNow() + 600
if thread is None:
thread = threading.current_thread()
with THREAD_INFO_LOCK:
if thread not in THREADS_TO_THREAD_INFO:
thread_info = {}
thread_info[ 'shutting_down' ] = False
THREADS_TO_THREAD_INFO[ thread ] = thread_info
return THREADS_TO_THREAD_INFO[ thread ]
def IsThreadShuttingDown():
me = threading.current_thread()
if isinstance( me, DAEMON ):
if HG.view_shutdown:
return True
else:
if HG.model_shutdown:
return True
thread_info = GetThreadInfo()
return thread_info[ 'shutting_down' ]
def ShutdownThread( thread ):
thread_info = GetThreadInfo( thread )
thread_info[ 'shutting_down' ] = True
class DAEMON( threading.Thread ):
def __init__( self, controller, name ):
threading.Thread.__init__( self, name = name )
self._controller = controller
self._name = name
self._event = threading.Event()
self._controller.sub( self, 'wake', 'wake_daemons' )
self._controller.sub( self, 'shutdown', 'shutdown' )
def _DoPreCall( self ):
if HG.daemon_report_mode:
HydrusData.ShowText( self._name + ' doing a job.' )
def GetCurrentJobSummary( self ):
return 'unknown job'
def GetName( self ):
return self._name
def shutdown( self ):
ShutdownThread( self )
self.wake()
def wake( self ):
self._event.set()
class DAEMONWorker( DAEMON ):
def __init__( self, controller, name, callable, topics = None, period = 3600, init_wait = 3, pre_call_wait = 0 ):
if topics is None:
topics = []
DAEMON.__init__( self, controller, name )
self._callable = callable
self._topics = topics
self._period = period
self._init_wait = init_wait
self._pre_call_wait = pre_call_wait
for topic in topics:
self._controller.sub( self, 'set', topic )
self.start()
def _CanStart( self, time_started_waiting ):
return self._PreCallWaitIsDone( time_started_waiting ) and self._ControllerIsOKWithIt()
def _ControllerIsOKWithIt( self ):
return True
def _PreCallWaitIsDone( self, time_started_waiting ):
# just shave a bit off so things that don't have any wait won't somehow have to wait a single accidentaly cycle
time_to_start = ( float( time_started_waiting ) - 0.1 ) + self._pre_call_wait
return HydrusData.TimeHasPassed( time_to_start )
def GetCurrentJobSummary( self ):
return self._callable
def run( self ):
self._event.wait( self._init_wait )
while True:
if IsThreadShuttingDown():
return
time_started_waiting = HydrusData.GetNow()
while not self._CanStart( time_started_waiting ):
time.sleep( 1 )
if IsThreadShuttingDown():
return
self._DoPreCall()
try:
self._callable( self._controller )
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.ShowText( 'Daemon ' + self._name + ' encountered an exception:' )
HydrusData.ShowException( e )
if IsThreadShuttingDown(): return
self._event.wait( self._period )
self._event.clear()
def set( self, *args, **kwargs ): self._event.set()
# Big stuff like DB maintenance that we don't want to run while other important stuff is going on, like user interaction or vidya on another process
class DAEMONBackgroundWorker( DAEMONWorker ):
def _ControllerIsOKWithIt( self ):
return self._controller.GoodTimeToDoBackgroundWork()
# Big stuff that we want to run when the user sees, but not at the expense of something else, like laggy session load
class DAEMONForegroundWorker( DAEMONWorker ):
def _ControllerIsOKWithIt( self ):
return self._controller.GoodTimeToDoForegroundWork()
class THREADCallToThread( DAEMON ):
def __init__( self, controller, name ):
DAEMON.__init__( self, controller, name )
self._callable = None
self._queue = Queue.Queue()
self._currently_working = True # start off true so new threads aren't used twice by two quick successive calls
def CurrentlyWorking( self ):
return self._currently_working
def GetCurrentJobSummary( self ):
return self._callable
def put( self, callable, *args, **kwargs ):
self._currently_working = True
self._queue.put( ( callable, args, kwargs ) )
self._event.set()
def run( self ):
while True:
try:
while self._queue.empty():
if IsThreadShuttingDown():
return
self._event.wait( 1200 )
self._event.clear()
self._DoPreCall()
( callable, args, kwargs ) = self._queue.get()
self._callable = ( callable, args, kwargs )
callable( *args, **kwargs )
self._callable = None
del callable
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.Print( traceback.format_exc() )
HydrusData.ShowException( e )
finally:
self._currently_working = False
time.sleep( 0.00001 )
class JobScheduler( threading.Thread ):
def __init__( self, controller ):
threading.Thread.__init__( self, name = 'Job Scheduler' )
self._controller = controller
self._waiting = []
self._waiting_lock = threading.Lock()
self._new_job_arrived = threading.Event()
self._current_job = None
self._cancel_filter_needed = threading.Event()
self._sort_needed = threading.Event()
self._controller.sub( self, 'shutdown', 'shutdown' )
def _FilterCancelled( self ):
with self._waiting_lock:
self._waiting = [ job for job in self._waiting if not job.IsCancelled() ]
def _GetLoopWaitTime( self ):
with self._waiting_lock:
if len( self._waiting ) == 0:
return 0.2
next_job = self._waiting[0]
time_delta_until_due = next_job.GetTimeDeltaUntilDue()
return min( 1.0, time_delta_until_due )
def _NoWorkToStart( self ):
with self._waiting_lock:
if len( self._waiting ) == 0:
return True
next_job = self._waiting[0]
if next_job.IsDue():
return False
else:
return True
def _SortWaiting( self ):
# sort the waiting jobs in ascending order of expected work time
with self._waiting_lock: # this uses __lt__ to sort
self._waiting.sort()
def _StartWork( self ):
jobs_started = 0
while True:
with self._waiting_lock:
if len( self._waiting ) == 0:
break
if jobs_started >= 10: # try to avoid spikes
break
next_job = self._waiting[0]
if next_job.IsDue():
next_job = self._waiting.pop( 0 )
next_job.StartWork()
jobs_started += 1
else:
break # all the rest in the queue are not due
def AddJob( self, job ):
with self._waiting_lock:
bisect.insort( self._waiting, job )
self._new_job_arrived.set()
def ClearOutDead( self ):
with self._waiting_lock:
self._waiting = [ job for job in self._waiting if not job.IsDead() ]
def GetName( self ):
return 'Job Scheduler'
def GetCurrentJobSummary( self ):
with self._waiting_lock:
return HydrusData.ToHumanInt( len( self._waiting ) ) + ' jobs'
def GetPrettyJobSummary( self ):
with self._waiting_lock:
num_jobs = len( self._waiting )
job_lines = [ repr( job ) for job in self._waiting ]
lines = [ HydrusData.ToHumanInt( num_jobs ) + ' jobs:' ] + job_lines
text = os.linesep.join( lines )
return text
def JobCancelled( self ):
self._cancel_filter_needed.set()
def shutdown( self ):
ShutdownThread( self )
def WorkTimesHaveChanged( self ):
self._sort_needed.set()
def run( self ):
while True:
try:
while self._NoWorkToStart():
if IsThreadShuttingDown():
return
#
if self._cancel_filter_needed.is_set():
self._FilterCancelled()
self._cancel_filter_needed.clear()
if self._sort_needed.is_set():
self._SortWaiting()
self._sort_needed.clear()
continue # if some work is now due, let's do it!
#
wait_time = self._GetLoopWaitTime()
self._new_job_arrived.wait( wait_time )
self._new_job_arrived.clear()
self._StartWork()
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.Print( traceback.format_exc() )
HydrusData.ShowException( e )
time.sleep( 0.00001 )
class SchedulableJob( object ):
def __init__( self, controller, scheduler, initial_delay, work_callable ):
self._controller = controller
self._scheduler = scheduler
self._work_callable = work_callable
self._next_work_time = HydrusData.GetNowFloat() + initial_delay
self._work_lock = threading.Lock()
self._currently_working = threading.Event()
self._is_cancelled = threading.Event()
def __lt__( self, other ): # for the scheduler to do bisect.insort noice
return self._next_work_time < other._next_work_time
def __repr__( self ):
return repr( self.__class__ ) + ': ' + repr( self._work_callable ) + ' next in ' + HydrusData.TimeDeltaToPrettyTimeDelta( self._next_work_time - HydrusData.GetNowFloat() )
def _BootWorker( self ):
self._controller.CallToThread( self.Work )
def Cancel( self ):
self._is_cancelled.set()
self._scheduler.JobCancelled()
def CurrentlyWorking( self ):
return self._currently_working.is_set()
def GetTimeDeltaUntilDue( self ):
return HydrusData.GetTimeDeltaUntilTimeFloat( self._next_work_time )
def IsCancelled( self ):
return self._is_cancelled.is_set()
def IsDead( self ):
return False
def IsDue( self ):
return HydrusData.TimeHasPassedFloat( self._next_work_time )
def StartWork( self ):
if self._is_cancelled.is_set():
return
self._currently_working.set()
self._BootWorker()
def Wake( self, next_work_time = None ):
if next_work_time is None:
next_work_time = HydrusData.GetNowFloat()
self._next_work_time = next_work_time
self._scheduler.WorkTimesHaveChanged()
def Work( self ):
try:
with self._work_lock:
self._work_callable()
finally:
self._currently_working.clear()
class RepeatingJob( SchedulableJob ):
def __init__( self, controller, scheduler, initial_delay, period, work_callable ):
SchedulableJob.__init__( self, controller, scheduler, initial_delay, work_callable )
self._period = period
self._stop_repeating = threading.Event()
def Cancel( self ):
SchedulableJob.Cancel( self )
self._stop_repeating.set()
def Delay( self, delay ):
self._next_work_time = HydrusData.GetNowFloat() + delay
self._scheduler.WorkTimesHaveChanged()
def IsFinishedWorking( self ):
return self._stop_repeating.is_set()
def SetPeriod( self, period ):
if period > 10.0:
period += random.random() # smooth out future spikes if ten of these all fire at the same time
self._period = period
def StartWork( self ):
if self._stop_repeating.is_set():
return
SchedulableJob.StartWork( self )
def Work( self ):
SchedulableJob.Work( self )
if not self._stop_repeating.is_set():
self._next_work_time = HydrusData.GetNowFloat() + self._period
self._scheduler.AddJob( self )
| [
"[email protected]"
] | |
ef89ebbee0f0db544ff5bf1b817aff77405ecae0 | 7d274ce8dae971228a23157a409b561020c22f66 | /tools/packages/SCons/Tool/sunc++.py | 00fb8c85284d59226fd62f3cfb8e577783661690 | [] | no_license | Eigenlabs/EigenD-Contrib | a212884d4fdf9ae0e1aeb73f6311606212e02f94 | 586fe17471571802295c792697f255e6cab51b17 | refs/heads/master | 2020-05-17T07:54:48.668925 | 2013-02-05T10:20:56 | 2013-02-05T10:20:56 | 3,239,072 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,744 | py | """SCons.Tool.sunc++
Tool-specific initialization for C++ on SunOS / Solaris.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunc++.py 4577 2009/12/27 19:43:56 scons"
import SCons
import os
import re
import subprocess
cplusplus = __import__('c++', globals(), locals(), [])
package_info = {}
def get_package_info(package_name, pkginfo, pkgchk):
try:
return package_info[package_name]
except KeyError:
version = None
pathname = None
try:
sadm_contents = open('/var/sadm/install/contents', 'r').read()
except EnvironmentError:
pass
else:
sadm_re = re.compile('^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M)
sadm_match = sadm_re.search(sadm_contents)
if sadm_match:
pathname = os.path.dirname(sadm_match.group(1))
try:
p = subprocess.Popen([pkginfo, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkginfo_contents = p.communicate()[0]
version_re = re.compile('^ *VERSION:\s*(.*)$', re.M)
version_match = version_re.search(pkginfo_contents)
if version_match:
version = version_match.group(1)
if pathname is None:
try:
p = subprocess.Popen([pkgchk, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkgchk_contents = p.communicate()[0]
pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M)
pathname_match = pathname_re.search(pkgchk_contents)
if pathname_match:
pathname = os.path.dirname(pathname_match.group(1))
package_info[package_name] = (pathname, version)
return package_info[package_name]
# use the package installer tool lslpp to figure out where cppc and what
# version of it is installed
def get_cppc(env):
cxx = env.subst('$CXX')
if cxx:
cppcPath = os.path.dirname(cxx)
else:
cppcPath = None
cppcVersion = None
pkginfo = env.subst('$PKGINFO')
pkgchk = env.subst('$PKGCHK')
for package in ['SPROcpl']:
path, version = get_package_info(package, pkginfo, pkgchk)
if path and version:
cppcPath, cppcVersion = path, version
break
return (cppcPath, 'CC', 'CC', cppcVersion)
def generate(env):
"""Add Builders and construction variables for SunPRO C++."""
path, cxx, shcxx, version = get_cppc(env)
if path:
cxx = os.path.join(path, cxx)
shcxx = os.path.join(path, shcxx)
cplusplus.generate(env)
env['CXX'] = cxx
env['SHCXX'] = shcxx
env['CXXVERSION'] = version
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
path, cxx, shcxx, version = get_cppc(env)
if path and cxx:
cppc = os.path.join(path, cxx)
if os.path.exists(cppc):
return cppc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
"[email protected]"
] | |
541e437fb4ea37f3049fb19b4f1687b8576a3ff7 | b9ffd9b9e88d497ee904e42dfd825080ee7713a9 | /files_from_working_server/waterscan-api/venv/bin/virtualenv | a6ba8866dbc0c5dce7c6083a719ce94eacc20629 | [] | no_license | naturalis/waterscan-ecosoft | a3d8e91d6634108b585a71c051f15216c8c3fdf4 | a2bcc3e656bbfb6ca08cd7e8ef7f119f0004d049 | refs/heads/master | 2021-06-15T05:26:15.457593 | 2019-06-21T09:39:22 | 2019-06-21T09:39:22 | 191,738,087 | 0 | 0 | null | 2021-05-06T19:36:36 | 2019-06-13T10:01:34 | Python | UTF-8 | Python | false | false | 239 | #!/home/ubuntu/waterscan-api/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
74aeddee7276ced1388155ecfd993003fe1085f4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /NhPYFqfQcFXWvdH8t_5.py | f7d2eb52db8c7a1424e591f89c82b393d52cea0d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | """
A positive integer multiplied times its inverse is always equal to 1:
`17*(1/17)==1`. Modular arithmetic has a similar inverse function, although,
for modulus `m`, we are confined to integers from 0 to m-1. The modular
multiplicative inverse of 3 modulus 5 is equal to 2 because `(3*2)%5==1`.
Another example: the modular inverse of 17 modulus 1000007 is equal to 58824
because `(17*58824)%1000007==1`. The modular inverse, if it exists, must
always be in the range 0 to m-1.
Create a function that has arguments integer `n` and modulus `m`. The function
will return the modular inverse of `n` mod `m`. If the modular inverse does
not exist, return `False`.
### Examples
mod_inv(2, 3) ➞ 2
mod_inv(12, 47) ➞ 4
mod_inv(11, 33) ➞ False
mod_inv(55, 678) ➞ 37
mod_inv(81, 3455) ➞ 2346
### Notes
* Some of the test cases have rather large integers, so if you attempt to do a brute force search of the entire modular field, you may not be successful due to the 12 second time limit imposed by the server. See **Resources** for a more efficient approach.
* The modular inverse of a number `n` modulus `m` exists only if `n` and `m` are coprime (i.e. they have no common factors other than 1).
* One practical use of modular inverse is in public-key cryptography like RSA where it can be used to determine the value of the private key.
"""
def egcd(j, k):
if j == 0:
return (k, 0, 1)
h, y, x = egcd(k%j,j)
return (h, x - (k//j) * y, y)
def mod_inv(j, m):
h, x, y = egcd(j, m)
if h != 1:
return False
return x%m
| [
"[email protected]"
] | |
654671700188a0cf97b551f4f3716dcebb0ade85 | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/gui/wgnc/events.py | 7291b9a2e8cb59d82254603badc1df9740d57f17 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 957 | py | # 2016.08.04 19:53:34 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/wgnc/events.py
import Event
class _WGNCEvents(object):
__slots__ = ('__eManager', 'onItemShowByDefault', 'onItemShowByAction', 'onItemUpdatedByAction', 'onProxyDataItemShowByDefault')
def __init__(self):
super(_WGNCEvents, self).__init__()
self.__eManager = Event.EventManager()
self.onItemShowByDefault = Event.Event(self.__eManager)
self.onItemShowByAction = Event.Event(self.__eManager)
self.onItemUpdatedByAction = Event.Event(self.__eManager)
self.onProxyDataItemShowByDefault = Event.Event(self.__eManager)
def clear(self):
self.__eManager.clear()
g_wgncEvents = _WGNCEvents()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\wgnc\events.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:53:34 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
7dd5572eb2f7345c5c19117511b278a267f52dbb | 0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af | /calc_area_circum.py | 2bd6a681d10b6c96d0890ec76ce91d3e8c64ef23 | [] | no_license | EngrDevDom/Everyday-Coding-in-Python | 61b0e4fcbc6c7f399587deab2fa55763c9d519b5 | 93329ad485a25e7c6afa81d7229147044344736c | refs/heads/master | 2023-02-25T05:04:50.051111 | 2021-01-30T02:43:40 | 2021-01-30T02:43:40 | 274,971,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | R = 7
PI = 3.141592654
print("The area is", R**2 * PI)
print("The circumference is", 2*R*PI)
| [
"[email protected]"
] | |
66c71b03c28c724553f740d6e72d6d54448e2888 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-bcs/huaweicloudsdkbcs/v2/model/show_blockchain_detail_request.py | 0799bf411b855abd953b527d517b0231e35885cf | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | # coding: utf-8
import re
import six
class ShowBlockchainDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'blockchain_id': 'str'
}
attribute_map = {
'blockchain_id': 'blockchain_id'
}
def __init__(self, blockchain_id=None):
"""ShowBlockchainDetailRequest - a model defined in huaweicloud sdk"""
self._blockchain_id = None
self.discriminator = None
self.blockchain_id = blockchain_id
@property
def blockchain_id(self):
"""Gets the blockchain_id of this ShowBlockchainDetailRequest.
blockchainID
:return: The blockchain_id of this ShowBlockchainDetailRequest.
:rtype: str
"""
return self._blockchain_id
@blockchain_id.setter
def blockchain_id(self, blockchain_id):
"""Sets the blockchain_id of this ShowBlockchainDetailRequest.
blockchainID
:param blockchain_id: The blockchain_id of this ShowBlockchainDetailRequest.
:type: str
"""
self._blockchain_id = blockchain_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBlockchainDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
1345771bc1b47bd1670f09a40a36343b34214e39 | f3110c8d0d1a232a0511ec559695882c1eb8594e | /DJANGO/quiz/views.py | 884b75847a4fa12c352577e3ce03ff8523cc36d7 | [] | no_license | SeungWookHan/Flutter-DRF | feb1394d52961824eac2a6e88c667a0e03375c47 | c793ccdacee1a4053a33471c226ff2ce8c5797dc | refs/heads/master | 2023-02-04T23:15:45.945139 | 2020-12-29T15:50:02 | 2020-12-29T15:50:02 | 324,955,539 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Quiz
from .serializers import QuizSerializer
import random
# Create your views here.
@api_view(['GET'])
def helloAPI(request):
return Response("hello world!")
@api_view(['GET'])
def randomQuiz(request, id):
totalQuizs = Quiz.objects.all()
randomQuizs = random.sample(list(totalQuizs), id)
serializer = QuizSerializer(randomQuizs, many=True) #many 부분을 통해 다량의 데이터도 직렬화 진행
return Response(serializer.data) | [
"[email protected]"
] | |
ba1076a3246d6802d9ea52c4729fe3b0503f4722 | 60448d1467b5a2531bab91e8bc721294a397e754 | /nmrpyschedule/generator.py | 6ae0f758186b260b88d0c9eb2eb10be36e7e9cae | [] | no_license | mattfenwick/NMRPySchedule | 8c7fda460b32f09138f08f15d302df4096075fb9 | 22e3399e9964137cb3e382b5805d457bb82e751f | refs/heads/master | 2021-01-18T16:32:23.013635 | 2013-06-11T15:36:29 | 2013-06-11T15:36:29 | 10,610,035 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | '''
@author: matt
'''
import itertools
import math
def uniform(ranges):
'''
Generate a table of n-dimensional points containing all grid points within the given ranges.
Includes both boundaries.
'''
theNums = [range(low, high + 1) for (low, high) in ranges]
return itertools.product(*theNums)
_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
def _haltonNumber(index, base):
result = 0
f = 1. / base
i = index
while i > 0:
result = result + f * (i % base)
i = int(i / base)
f = f / base
return result
def _scaledHaltonNumber(factor, shift, index, prime):
return int(factor * _haltonNumber(index, prime)) + shift
def halton(ranges):
'''
Generate subrandom sequence of n-dimensional points according to the Halton sequence.
Returns a generator of an infinite sequence.
'''
scalingFactors = [max(x) - min(x) for x in ranges]
shifts = [min(x) for x in ranges]
if len(ranges) > len(_primes):
raise ValueError("not enough primes defined: please define more or reduce the dimensionality")
ix = 0
while True:
pt = []
for (sf, s, p) in zip(scalingFactors, shifts, _primes):
pt.append(_scaledHaltonNumber(sf, s, ix, p))
yield pt
ix += 1
def _distance(pt, origin):
zipped = zip(pt, origin)
sumSquares = sum([abs(a - b) ** 2 for (a, b) in zipped])
dist = math.sqrt(sumSquares)
return dist
def _myDist(pt, origin, width, maxDeviation):
dist = _distance(pt, origin)
ratio = dist / width
return abs(ratio - round(ratio)) * width <= maxDeviation
def concentricShell(ranges, shellSpacing, maxDeviation):
'''
Generate all points whose distance from the origin is close to a multiple
of an arbitrary number. The origin is defined as the point whose coordinates
are the low end of each dimension's range.
'''
points = uniform(ranges)
origin = [r[0] for r in ranges]
return [pt for pt in points if _myDist(pt, origin, shellSpacing, maxDeviation)]
def _myFilter(pt, origin, offsetAngle, degreeGap, tolerance):
y,x = pt[0] - origin[0], pt[1] - origin[1]
theta = m.atan2(x, y) * 180. / m.pi # angle in degrees
ratio = (theta + offsetAngle) / degreeGap
return abs(ratio - round(ratio)) * degreeGap < tolerance
def radial(ranges, offsetAngle, gapAngle, maximumDeviation):
'''
Generate coordinates of points, where the points lie along 'spokes' radiating out from the origin.
'''
allPoints = uniform(ranges)
origin = [r[0] for r in ranges]
return [pt for pt in allPoints if _myFilter(pt, origin, offsetAngle, gapAngle, maximumDeviation)]
| [
"[email protected]"
] | |
3b4f8d5f9825913e31189eddb81b7034aebe454f | 46669c775591b38f71382f690cb93a4879366595 | /src/020_create_xml.py | 2cdeed0319fdbdfe0862a1c99e4fb20e25ad7850 | [
"CC-BY-4.0"
] | permissive | kouigenjimonogatari/kouigenjimonogatari.github.io | e234abe0e4145bbe879756f6af19a546c01a2ff4 | c0ec798d550bda5670d8af15c4028ff925e6495d | refs/heads/master | 2022-10-12T19:52:05.229525 | 2022-10-04T09:34:51 | 2022-10-04T09:34:51 | 223,747,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,077 | py | import sys
import urllib
import json
import argparse
import urllib.request
import unicodedata
import collections
import os
import xml.etree.ElementTree as ET
import csv
import glob
import urllib.parse
def get_mdata(manifest):
print(manifest)
res = urllib.request.urlopen(manifest)
# json_loads() でPythonオブジェクトに変換
data = json.loads(res.read().decode('utf-8'))
canvases = data["sequences"][0]["canvases"]
map = {}
for i in range(len(canvases)):
canvas = canvases[i]
canvas_id = canvas["@id"]
width = canvas["width"]
height = canvas["height"]
url = canvas["images"][0]["resource"]["@id"]
map[canvas_id] = {
"width": width,
"height": height,
"url": url
}
return map
vols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12 ,13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54]
m_map = {}
for vol in vols:
prefix = ".//{http://www.tei-c.org/ns/1.0}"
xml = ".//{http://www.w3.org/XML/1998/namespace}"
tmp_path = "data/template.xml"
tree = ET.parse(tmp_path)
ET.register_namespace('', "http://www.tei-c.org/ns/1.0")
ET.register_namespace('xml', "http://www.w3.org/XML/1998/namespace")
root = tree.getroot()
para = root.find(prefix + "body").find(prefix + "p")
files = glob.glob("../api/items/*.json")
surfaceGrp = root.find(prefix+"surfaceGrp")
with open("../api/item_sets/"+str(vol).zfill(2)+".json", 'r') as f:
rdf_collection = json.load(f)
manifest = rdf_collection[0]["http://www.w3.org/2000/01/rdf-schema#seeAlso"][0]["@id"]
title = rdf_collection[0]["http://www.w3.org/2000/01/rdf-schema#label"][0]["@value"]
surfaceGrp.set("facs", manifest)
if manifest not in m_map:
m_map[manifest] = get_mdata(manifest)
canvas_data = m_map[manifest]
prev_page = -1
canvas_map = {}
for file in sorted(files):
with open(file, 'r') as f:
data = json.load(f)
# print(file)
value = data[0]["http://www.w3.org/2000/01/rdf-schema#label"][0]["@value"]
# if "http://example.org/冊数名" not in data[0]:
# continue
vol_ = int(data[0]["http://purl.org/dc/terms/isPartOf"][0]["@id"].split("/")[-1].split(".")[0])
if vol != vol_:
continue
root.find(prefix + "title").text = "校異源氏物語・"+ title
id = data[0]["@id"]
page = data[0]["https://w3id.org/kouigenjimonogatari/api/property/page"][0]["@value"]
# 新しい頁
if page != prev_page:
prev_page = page
lb = ET.Element(
"{http://www.tei-c.org/ns/1.0}lb")
para.append(lb)
pb = ET.Element(
"{http://www.tei-c.org/ns/1.0}pb")
pb.set("n", str(page))
pb.set("facs", "#zone_"+str(page).zfill(4))
para.append(pb)
relation = data[0]["http://purl.org/dc/terms/relation"][0]["@id"]
relation = urllib.parse.unquote(relation)
canvas_id = relation.split("canvas=")[1]
obj = canvas_data[canvas_id]
if canvas_id not in canvas_map:
canvas_map[canvas_id] = {
"url": obj["url"],
"zones": []
}
if page % 2 == 0:
lrx = obj["width"]
ulx = int(lrx / 2)
else:
lrx = int(obj["width"] / 2)
ulx = 0
zone = ET.Element(
"{http://www.tei-c.org/ns/1.0}zone")
zone.set("xml:id", "zone_"+str(page).zfill(4))
zone.set("lrx", str(lrx))
zone.set("lry", str(obj["height"]))
zone.set("ulx", str(ulx))
zone.set("uly", str(0))
canvas_map[canvas_id]["zones"].append(zone)
lb = ET.Element(
"{http://www.tei-c.org/ns/1.0}lb")
para.append(lb)
line = ET.Element(
"{http://www.tei-c.org/ns/1.0}seg")
line.set("corresp", id)
line.text = value
# para.append(line)
para.append(line)
for canvas_id in canvas_map:
obj = canvas_map[canvas_id]
surface = ET.Element(
"{http://www.tei-c.org/ns/1.0}surface")
surfaceGrp.append(surface)
graphic = ET.Element(
"{http://www.tei-c.org/ns/1.0}graphic")
graphic.set("n", canvas_id)
graphic.set("url", obj["url"])
surface.append(graphic)
for zone in obj["zones"]:
surface.append(zone)
tree.write("../tei/"+str(vol).zfill(2)+".xml", encoding="utf-8")
| [
"[email protected]"
] | |
7920769fb9df2c743760034190be86dff1f1947a | 65c0ef56c2e2c3e1646a610f49e6dd06f2c6102d | /src/libs/cmd/implement/emulator/fastboot.py | e8c8437bb490f5e1cb28f6289ccb8449e2873cad | [
"MIT"
] | permissive | VirtualVFix/AndroidTestFramework | d3411f328a793ee7b007c4736983204aae81b739 | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | refs/heads/master | 2020-08-11T14:48:12.454415 | 2019-10-12T10:20:43 | 2019-10-12T10:20:43 | 214,582,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "09/22/17 14:27"
from .cmd import Cmd
from libs.cmd.implement.base.fastboot import Fastboot
from libs.cmd.implement.base.cmd import Cmd as CmdBase
#: Replace :class:`implement.base.cmd.Cmd` class by :class:`implement.emulator.cmd.Cmd`
#: After class replace Fastboot emulator class have same signature as Fastboot base
Fastboot.__bases__ = tuple([x if not issubclass(x, CmdBase) else Cmd for x in Fastboot.__bases__])
| [
"github.com/virtualvfix"
] | github.com/virtualvfix |
1926722da71183f936fd15d9c412fe2e5f789af4 | 35fb71dd7b67fcee5e01e090e5f2a04dbbf30a15 | /network_base/week01/day02/lqueue.py | a2b102948e76f64e135371e6dfc924f57c1832a7 | [] | no_license | zlz2013/zlz | 3119795848ed9cc43708482a2aa3e764c1312394 | 228d04a30b0782d859323e507ddd0c7459635bfb | refs/heads/master | 2020-06-05T17:44:47.975328 | 2019-09-10T11:57:23 | 2019-09-10T11:57:23 | 192,500,784 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | """
lqueue.py 链式队列
重点代码
思路分析:
1.基于链表模型完成链式栈
2.链表开端作为队头,尾端作为队尾
"""
class LQueueError(Exception):
pass
class Node:
def __init__(self,data,next=None):
self.data=data
self.next=next
#链式队列类
class LQueue:
def __init__(self):
#初始头尾指向一个没有实际意义的节点
self.front=self.rear=Node(None)
def is_empty(self):
return self.front==self.rear
#入队 尾动
def enqueue(self,elem):
self.rear.next=Node(elem)
self.rear=self.rear.next
#出队 头动
def dequeue(self):
if self.front==self.rear:
raise LQueueError("Queue is empty")
self.front=self.front.next
return self.front.data
if __name__=="__main__":
lq=LQueue()
lq.enqueue(10)
lq.enqueue(20)
lq.enqueue(30)
while not lq.is_empty():
print(lq.dequeue()) | [
"[email protected]"
] | |
1e738f57abaf2f4bade3d418917aad39cbae070f | 7649278f4bda14aaf4ec02b7ae58094e16d98618 | /Project/scripts/cartpole_eval.py | 61018275f50b4f2d739e06cf8596805d284be6f9 | [] | no_license | peng00bo00/optlearningcontrol | 1877381ca749f17caf75ede02a5cb263cbddaa79 | 44eff6d17e4da0b0adc85e5e84cf4b8edb8a1bb8 | refs/heads/master | 2021-01-06T18:44:58.981575 | 2020-05-19T17:44:34 | 2020-05-19T17:44:34 | 241,445,231 | 0 | 0 | null | 2020-02-18T19:11:08 | 2020-02-18T19:11:08 | null | UTF-8 | Python | false | false | 1,121 | py | import numpy as np
import tensorflow as tf
import gym
from gym import wrappers
import os
import matplotlib.pyplot as plt
## environment
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, '../animations/', force=True)
env.reset()
## GPU configuration
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def play(env, policy):
x = env.reset()
terminal = False
rewards = []
while not terminal:
env.render()
u = policy.predict(x.reshape([1, -1]))
u = np.argmax(u)
x, r, terminal, _ = env.step(u)
rewards.append(r)
return np.sum(rewards)
# DQN
policy = tf.keras.models.load_model("../models/DQN_q_network.h5")
play(env, policy)
## Double DQN
policy = tf.keras.models.load_model("../models/DoubleDQN_q_network.h5")
play(env, policy)
## Prioritized Experience Replay
policy = tf.keras.models.load_model("../models/PrioritizedDQN_q_network.h5")
play(env, policy)
## Deuling DQN
policy = tf.keras.models.load_model("../models/DeulDQN_q_network.h5")
play(env, policy) | [
"[email protected]"
] | |
874a69d989a964f5f0210a7eafbf994cd3c38d0c | 6ddcdda679089b228d55ef098addfe8193287d88 | /py/lpthw/test.py | 500c610d2d9010ee315cb403153222d93a1680c9 | [
"MIT"
] | permissive | danyfang/SourceCode | 518e4715a062ed1ad071dea023ff4785ce03b068 | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | refs/heads/master | 2021-06-06T16:36:50.999324 | 2021-04-23T08:52:20 | 2021-04-23T08:52:20 | 120,310,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/python
from __future__ import division
#lambda function to calculate factor
x = int(raw_input("Please input an integer\n > "))
print reduce(lambda x,y: x*y, range(1,x+1))
def add(x,y):
return x+y
def sub(x,y):
return x-y
def mul(x,y):
return x*y
def div(x,y):
return x/y
operator = {"+":add, "-":sub, "*":mul, "/":div}
if __name__ == "__main__":
x = raw_input("Please input a numebr\n > ")
o = raw_input("Please input an operator\n > ")
y = raw_input("Please input a numebr\n > ")
print operator.get(o)(int(x), int(y))
| [
"[email protected]"
] | |
1672bbe239bcd2a87c3e5ce0491dc2f82dea0321 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /BitPim/rev3177-3275/right-branch-3275/phones/com_lgvx3200.py | 70be5ade0e3af9b75b64ff382802597ba2e16621 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,894 | py | """Communicate with the LG VX3200 cell phone
The VX3200 is somewhat similar to the VX4400
"""
import time
import cStringIO
import sha
import re
import common
import copy
import p_lgvx3200
import com_lgvx4400
import com_brew
import com_phone
import com_lg
import prototypes
import phone_media_codec
import conversions
media_codec=phone_media_codec.codec_name
class Phone(com_lgvx4400.Phone):
"Talk to the LG VX3200 cell phone"
desc="LG-VX3200"
wallpaperindexfilename="download/dloadindex/brewImageIndex.map"
ringerindexfilename="download/dloadindex/brewRingerIndex.map"
protocolclass=p_lgvx3200
serialsname='lgvx3200'
imagelocations=(
( 11, "download/dloadindex/brewImageIndex.map", "download", "images", 3) ,
)
ringtonelocations=(
( 27, "download/dloadindex/brewRingerIndex.map", "user/sound/ringer", "ringers", 30),
)
builtinimages= ('Sport 1', 'Sport 2', 'Nature 1', 'Nature 2',
'Animal', 'Martini', 'Goldfish', 'Umbrellas',
'Mountain climb', 'Country road')
builtinringtones= ('Ring 1', 'Ring 2', 'Ring 3', 'Ring 4', 'Ring 5', 'Ring 6',
'Ring 7', 'Ring 8', 'Annen Polka', 'Pachelbel Canon',
'Hallelujah', 'La Traviata', 'Leichte Kavallerie Overture',
'Mozart Symphony No.40', 'Bach Minuet', 'Farewell',
'Mozart Piano Sonata', 'Sting', 'O solemio',
'Pizzicata Polka', 'Stars and Stripes Forever',
'Pineapple Rag', 'When the Saints Go Marching In', 'Latin',
'Carol 1', 'Carol 2')
def __init__(self, logtarget, commport):
com_lgvx4400.Phone.__init__(self,logtarget,commport)
self.mode=self.MODENONE
self.mediacache=self.DirCache(self)
def makeentry(self, counter, entry, dict):
e=com_lgvx4400.Phone.makeentry(self, counter, entry, dict)
e.entrysize=0x202
return e
def getindex(self, indexfile):
"Read an index file"
index={}
if re.search("ImageIndex", indexfile) is not None:
ind=0
for ifile in 'wallpaper', 'poweron', 'poweroff':
ifilefull="download/"+ifile+".bit"
try:
mediafiledata=self.mediacache.readfile(ifilefull)
if len(mediafiledata)!=0:
index[ind]=ifile
ind = ind + 1
self.log("Index file "+indexfile+" entry added: "+ifile)
except:
pass
else:
try:
buf=prototypes.buffer(self.getfilecontents(indexfile))
except com_brew.BrewNoSuchFileException:
return index
g=self.protocolclass.indexfile()
g.readfrombuffer(buf, logtitle="Read index file "+indexfile)
for i in g.items:
if i.index!=0xffff:
ifile=re.sub("\.mid|\.MID", "", i.name)
self.log("Index file "+indexfile+" entry added: "+ifile)
index[i.index]=ifile
return index
def getmedia(self, maps, result, key):
"""Returns the contents of media as a dict where the key is a name as returned
by getindex, and the value is the contents of the media"""
media={}
type=None
for offset,indexfile,location,type,maxentries in maps:
index=self.getindex(indexfile)
for i in index:
if type=="images":
mediafilename=index[i]+".bit"
else:
mediafilename=index[i]+".mid"
try:
media[index[i]]=self.mediacache.readfile(location+"/"+mediafilename)
except com_brew.BrewNoSuchFileException:
self.log("Missing index file: "+location+"/"+mediafilename)
result[key]=media
return result
def savemedia(self, mediakey, mediaindexkey, maps, results, merge, reindexfunction):
"""Actually saves out the media
@param mediakey: key of the media (eg 'wallpapers' or 'ringtone')
@param mediaindexkey: index key (eg 'wallpaper-index')
@param maps: list index files and locations
@param results: results dict
@param merge: are we merging or overwriting what is there?
@param reindexfunction: the media is re-indexed at the end. this function is called to do it
"""
print results.keys()
wp=results[mediakey].copy()
wpi=results[mediaindexkey].copy()
for k in wp.keys():
wp[k]['name']=re.sub("\....$", "", wp[k]['name'])
for k in wpi.keys():
if wpi[k]['origin']=='builtin':
del wpi[k]
init={}
for offset,indexfile,location,type,maxentries in maps:
init[type]={}
for k in wpi.keys():
if wpi[k]['origin']==type:
index=k-offset
name=wpi[k]['name']
data=None
del wpi[k]
for w in wp.keys():
if wp[w]['name']==name and wp[w]['origin']==type:
data=wp[w]['data']
del wp[w]
if not merge and data is None:
continue
init[type][index]={'name': name, 'data': data}
print init.keys()
for w in wp.keys():
o=wp[w].get("origin", "")
if o is not None and len(o) and o in init:
idx=-1
while idx in init[o]:
idx-=1
init[o][idx]=wp[w]
del wp[w]
for offset,indexfile,location,type,maxentries in maps:
if type=="camera": break
index=init[type]
try:
dirlisting=self.getfilesystem(location)
except com_brew.BrewNoSuchDirectoryException:
self.mkdirs(location)
dirlisting={}
for i in dirlisting.keys():
dirlisting[i[len(location)+1:]]=dirlisting[i]
del dirlisting[i]
dellist=[]
if not merge:
wpi=results[mediaindexkey]
for i in wpi:
entry=wpi[i]
if entry['origin']==type:
delit=True
for idx in index:
if index[idx]['name']==entry['name']:
delit=False
break
if delit:
if type=="ringers":
entryname=entry['name']+".mid"
else:
entryname=entry['name']+".bit"
if entryname in dirlisting:
dellist.append(entryname)
else:
self.log("%s in %s index but not filesystem" % (entryname, type))
print "deleting",dellist
for f in dellist:
self.mediacache.rmfile(location+"/"+f)
if type=="images":
losem=[]
wpi=results[mediaindexkey]
for idx in index:
delit=True
for i in wpi:
entry=wpi[i]
if entry['origin']==type:
if index[idx]['name']==entry['name']:
delit=False
break
if delit:
self.log("Inhibited upload of illegit image (not originally on phone): "+index[idx]['name'])
losem.append(idx)
for idx in losem:
del index[idx]
while len(index)<maxentries and len(wp):
idx=-1
while idx in index:
idx-=1
k=wp.keys()[0]
index[idx]=wp[k]
del wp[k]
index=self._normaliseindices(index) # hey look, I called a function!
if len(index)>maxentries:
keys=index.keys()
keys.sort()
for k in keys[maxentries:]:
idx=-1
while idx in wp:
idx-=1
wp[idx]=index[k]
del index[k]
for k in index.keys():
if type=="ringers":
index[k]['name']=index[k]['name']+".mid"
else:
index[k]['name']=index[k]['name']+".bit"
keys=index.keys()
keys.sort()
ifile=self.protocolclass.indexfile()
ifile.numactiveitems=len(keys)
for k in keys:
entry=self.protocolclass.indexentry()
entry.index=k
entry.name=index[k]['name']
ifile.items.append(entry)
while len(ifile.items)<maxentries:
ifile.items.append(self.protocolclass.indexentry())
buffer=prototypes.buffer()
ifile.writetobuffer(buffer, autolog=False)
if type!="images":
self.logdata("Updated index file "+indexfile, buffer.getvalue(), ifile)
self.writefile(indexfile, buffer.getvalue())
for k in keys:
entry=index[k]
entryname=entry['name']
data=entry.get("data", None)
if type=="images":
if entryname!="wallpaper.bit" and entryname!="poweron.bit" and entryname!="poweroff.bit":
self.log("The wallpaper files can only be wallpaper.bmp, poweron.bmp or poweroff.bmp. "+entry['name']+" does not conform - skipping upload.")
continue
if data is None:
if entryname not in dirlisting:
self.log("Index error. I have no data for "+entryname+" and it isn't already in the filesystem - skipping upload.")
continue
if type=="images" and data[0:2]=="BM":
data=conversions.convertbmptolgbit(data)
if data is None:
self.log("The wallpaper BMP images must be 8BPP or 24BPP, "+entry['name']+", does not comply - skipping upload.")
continue
if type=="images" and (common.LSBUint16(data[0:2])!=128 or common.LSBUint16(data[2:4])!=128):
self.log("The wallpaper must be 128x128, "+entry['name']+", does not comply - skipping upload.")
continue
if type!="images":
if entryname in dirlisting and len(data)==dirlisting[entryname]['size']:
self.log("Skipping writing %s/%s as there is already a file of the same length" % (location,entryname))
continue
self.mediacache.writefile(location+"/"+entryname, data)
self.log("Wrote media file: "+location+"/"+entryname)
if len(wp):
for k in wp:
self.log("Unable to put %s on the phone as there weren't any spare index entries" % (wp[k]['name'],))
del results[mediakey] # done with it
reindexfunction(results)
return results
my_model='AX3200'
parentprofile=com_lgvx4400.Profile
class Profile(parentprofile):
protocolclass=Phone.protocolclass
serialsname=Phone.serialsname
phone_manufacturer='LG Electronics Inc'
phone_model='VX3200'
phone_manufacturer='LG Electronics Inc.'
phone_model='VX3200 107'
usbids=com_lgvx4400.Profile.usbids_usbtoserial
def convertphonebooktophone(self, helper, data):
"""Converts the data to what will be used by the phone
@param data: contains the dict returned by getfundamentals
as well as where the results go"""
results={}
speeds={}
self.normalisegroups(helper, data)
for pbentry in data['phonebook']:
if len(results)==self.protocolclass.NUMPHONEBOOKENTRIES:
break
e={} # entry out
entry=data['phonebook'][pbentry] # entry in
try:
serial1=helper.getserial(entry.get('serials', []), self.serialsname, data['uniqueserial'], 'serial1', 0)
serial2=helper.getserial(entry.get('serials', []), self.serialsname, data['uniqueserial'], 'serial2', serial1)
e['serial1']=serial1
e['serial2']=serial2
for ss in entry["serials"]:
if ss["sourcetype"]=="bitpim":
e['bitpimserial']=ss
assert e['bitpimserial']
e['name']=helper.getfullname(entry.get('names', []),1,1,22)[0]
cat=helper.makeone(helper.getcategory(entry.get('categories', []),0,1,22), None)
if cat is None:
e['group']=0
else:
key,value=self._getgroup(cat, data['groups'])
if key is not None:
if key>5:
e['group']=0
print "Custom Groups in PB not supported - setting to No Group for "+e['name']
else:
e['group']=key
else:
e['group']=0
emails=helper.getemails(entry.get('emails', []) ,0,self.protocolclass.NUMEMAILS,48)
e['emails']=helper.filllist(emails, self.protocolclass.NUMEMAILS, "")
e['url']=helper.makeone(helper.geturls(entry.get('urls', []), 0,1,48), "")
e['memo']=helper.makeone(helper.getmemos(entry.get('memos', []), 0, 1, self.protocolclass.MEMOLENGTH-1), "")
minnumbers=1
if len(emails): minnumbers=0
numbers=helper.getnumbers(entry.get('numbers', []),minnumbers,self.protocolclass.NUMPHONENUMBERS)
e['numbertypes']=[]
e['numbers']=[]
for numindex in range(len(numbers)):
num=numbers[numindex]
b4=len(e['numbertypes'])
type=num['type']
for i,t in enumerate(self.protocolclass.numbertypetab):
if type==t:
if i in e['numbertypes'] and t[-1]!='2':
type+='2'
continue
e['numbertypes'].append(i)
break
if t=='none': # conveniently last entry
e['numbertypes'].append(i)
break
if len(e['numbertypes'])==b4:
continue
number=self.phonize(num['number'])
if len(number)==0:
continue
if len(number)>48: # get this number from somewhere sensible
number=number[:48] # truncate for moment
e['numbers'].append(number)
sd=num.get("speeddial", -1)
if self.protocolclass.NUMSPEEDDIALS:
if sd>=self.protocolclass.FIRSTSPEEDDIAL and sd<=self.protocolclass.LASTSPEEDDIAL:
speeds[sd]=(e['bitpimserial'], numindex)
e['numbertypes']=helper.filllist(e['numbertypes'], 5, 0)
e['numbers']=helper.filllist(e['numbers'], 5, "")
ecring=helper.getringtone(entry.get('ringtones', []), 'call', None)
if ecring is not None:
if ecring not in Phone.builtinringtones:
print "Ringers past Carol 2 in PB not supported - setting to Default Ringer for "+e['name']+" id was: "+ecring
ecring=None
e['ringtone']=ecring
emring=helper.getringtone(entry.get('ringtones', []), 'message', None)
if emring is not None:
if emring not in Phone.builtinringtones:
print "Ringers past Carol 2 in PB not supported - setting to Default MsgRinger for "+e['name']+" id was: "+emring
emring=None
e['msgringtone']=emring
ewall=helper.getwallpaper(entry.get('wallpapers', []), 'call', None)
if ewall is not None:
print "Custom Wallpapers in PB not supported - setting to Default Wallpaper for "+e['name']
e['wallpaper']=None
e['secret']=helper.getflag(entry.get('flags',[]), 'secret', False)
results[pbentry]=e
except helper.ConversionFailed:
continue
if self.protocolclass.NUMSPEEDDIALS:
data['speeddials']=speeds
data['phonebook']=results
return data
_supportedsyncs=(
('phonebook', 'read', None), # all phonebook reading
('calendar', 'read', None), # all calendar reading
('wallpaper', 'read', None), # all wallpaper reading
('ringtone', 'read', None), # all ringtone reading
('phonebook', 'write', 'OVERWRITE'), # only overwriting phonebook
('calendar', 'write', 'OVERWRITE'), # only overwriting calendar
('wallpaper', 'write', 'OVERWRITE'), # merge and overwrite wallpaper
('ringtone', 'write', 'MERGE'), # merge and overwrite ringtone
('ringtone', 'write', 'OVERWRITE'),
('call_history', 'read', None),
('memo', 'read', None), # all memo list reading DJP
('memo', 'write', 'OVERWRITE'), # all memo list writing DJP
('sms', 'read', None),
('sms', 'write', 'OVERWRITE'),
)
WALLPAPER_WIDTH=128
WALLPAPER_HEIGHT=128
MAX_WALLPAPER_BASENAME_LENGTH=19
WALLPAPER_FILENAME_CHARS="abcdefghijklmnopqrstuvwxyz0123456789 ."
WALLPAPER_CONVERT_FORMAT="bmp"
MAX_RINGTONE_BASENAME_LENGTH=19
RINGTONE_FILENAME_CHARS="abcdefghijklmnopqrstuvxwyz0123456789 ."
imageorigins={}
imageorigins.update(common.getkv(parentprofile.stockimageorigins, "images"))
def GetImageOrigins(self):
return self.imageorigins
imagetargets={}
imagetargets.update(common.getkv(parentprofile.stockimagetargets, "wallpaper",
{'width': 128, 'height': 128, 'format': "BMP"}))
def GetTargetsForImageOrigin(self, origin):
return self.imagetargets
def __init__(self):
parentprofile.__init__(self)
| [
"[email protected]"
] | |
57e2ee283d3febe993b10065b968ba9f581b5a55 | 6a52db9b913c3677dfbcd55776e1a14cddde359d | /parceiros/migrations/0006_auto_20181117_0309.py | 3ab094723e58de570f6ab1ca3fb06592a7e4d342 | [] | no_license | tiagocordeiro/casaconceito-sie | 47a2922f328fa7c9e13e84dae1b6a9135edd6236 | 892e42a655bb4ef08952c5be167e281720f40b49 | refs/heads/master | 2023-08-31T14:48:21.396973 | 2021-05-11T18:18:07 | 2021-05-11T18:18:07 | 140,175,770 | 0 | 0 | null | 2023-09-13T15:14:42 | 2018-07-08T14:38:35 | HTML | UTF-8 | Python | false | false | 497 | py | # Generated by Django 2.1.3 on 2018-11-17 05:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('parceiros', '0005_auto_20181117_0251'),
]
operations = [
migrations.AlterField(
model_name='indicacaopagamentos',
name='indicacao',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='parceiros.Indicacao'),
),
]
| [
"[email protected]"
] | |
20da8a1571be3297fdc2a8720ab6d9c6f804eede | a0801d0e7325b31f0383fc68517e208680bb36d6 | /Kattis/anagramcounting.py | 362adbaa6dfaa8948b5b2fb3c59253bb2a0f31b6 | [] | no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from collections import Counter
_f = { 0: 1 }
def f(n):
if n not in _f:
_f[n] = n * f(n - 1)
return _f[n]
def g(s):
cs = Counter(s)
vs = cs.values()
l = len(s)
r = f(l)
for v in vs:
r //= f(v)
return r
while True:
try:
i = input()
print(g(i))
except:
break
| [
"[email protected]"
] | |
aab5320af9b48f92a2e321db7cb26674e6d0a401 | 24f2696aab87f1632705a7c8b2d3b866e26aa3ee | /LCA_236.py | 281941167ef5ab53585044747e11fcdfbd20eb5e | [] | no_license | adiggo/leetcode_py | 44a77a0b029f4d92bd0d8e24cad21ceea52e7794 | 4aa3a3a0da8b911e140446352debb9b567b6d78b | refs/heads/master | 2020-04-06T07:05:21.770518 | 2016-07-01T16:00:40 | 2016-07-01T16:00:40 | 30,397,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
return right if not left else left if not right else root
| [
"[email protected]"
] | |
a22da995080be9b7137906676c4dc3cf0bb0d461 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_203/ch11_2019_08_15_11_47_33_547429.py | e6e08c29c09dcae0bbe100b081717e5f1418ada4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def celsius_para_fahrenheit (x) :
y = 1.8*x+32
return y
celsius_para_farenheit (7)
print (x) | [
"[email protected]"
] | |
4af9fff107581efba17158a157bc33c7f8d43be6 | f5390652068c736aea061a0979f27ba32b51784f | /Web/Web/views.py | dfd74dd32a7e1fe8b04516a9a481ccbc516c7484 | [] | no_license | kho903/Project_Reflux | 172c9bd7062f4cc9f84c576412557435f63906b5 | 0f1cdab08bc71d4c219b34839f63cc96f7c90d47 | refs/heads/master | 2022-12-14T03:29:21.008229 | 2020-09-02T09:24:16 | 2020-09-02T09:24:16 | 286,716,990 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views.generic.base import TemplateView
from django.views.generic import CreateView
class HomeView(TemplateView):
template_name = 'home.html'
class UserCreateView(CreateView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = reverse_lazy('register_done')
class UserCreateDoneTV(TemplateView):
template_name = 'registration/register_done.html'
| [
"[email protected]"
] | |
8ad3db0ec4061062900fc2e03cbbae10b8f45f56 | 498d889585187ca56018b15f38880b8a671442b8 | /utils.py | 5c6fc73da2244ffe9d611253c389cb6fc386f278 | [] | no_license | mandasdasdasd/excel-fe | b89b06681bd7c91000f491a5f85f0c8577ac0fc3 | a81eb0085192c0932992745284c24efda9859241 | refs/heads/master | 2022-12-24T01:53:03.351947 | 2019-12-04T10:09:14 | 2019-12-04T10:09:14 | 205,658,439 | 0 | 0 | null | 2022-12-11T05:10:56 | 2019-09-01T10:01:07 | Vue | UTF-8 | Python | false | false | 218 | py | import hmac, random
class Encryption(object):
def __init__(self):
self.key = "bigdata"
def hmac_md5(self, s):
return hmac.new(self.key.encode('utf-8'), s.encode('utf-8'), 'MD5').hexdigest()
| [
"[email protected]"
] | |
e966a809733c647ed153d31bbebf7df6fc19afa7 | b66304878239ecea3e38593112bcb861fe9815db | /project_template/project_template/urls.py | ff276fbbe3737f276113407e1c2fa281c94dbdfe | [] | no_license | cowhite/django_pymongo_admin | e8ecd9fd193cf43489b9ac19d6a0444c719c7e42 | 8d814b248d82d7572e167be5ed2a2418d5eddd42 | refs/heads/master | 2020-07-03T02:12:26.559308 | 2016-11-19T15:18:04 | 2016-11-19T15:18:04 | 74,205,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | """project_template URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^admin/pymongo/',
include("django_pymongo_admin.urls",
namespace="django-pymongo-admin")),
]
| [
"[email protected]"
] | |
c9ec417f68e16aaa3a781bc04a7e47b8cffff73c | c8c0d3e83dbec83ccb89a751dc3e656bb482a2ce | /ZombieGame/modules/coordinates.py | 2e949bd8c3dcf4cac12328f9fe1025eaec8889dd | [] | no_license | Yamase31/python-zombie-game | 80658bcfcb05b819265dfc75c5563391f19b1861 | dfd931ecf5caac9348b652862fc0b018979491d9 | refs/heads/main | 2023-07-07T08:58:43.314898 | 2021-08-10T00:33:36 | 2021-08-10T00:33:36 | 394,479,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py |
# Quick coordinate class to contain both x and y
# Overrides == for easy comparison
class Coordinates(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __sub__(self, other):
if type(other) == int:
return Coordinates(self.x - other, self.y - other)
if type(other) == Coordinates:
return Coordinates(self.x - other.x, self.y - other.y)
if type(other) == tuple:
return Coordinates(self.x - other[0], self.y - other[1])
def __add__(self, other):
if type(other) == int:
return Coordinates(self.x + other, self.y + other)
if type(other) == Coordinates:
return Coordinates(self.x + other.x, self.y + other.y)
if type(other) == tuple:
return Coordinates(self.x + other[0], self.y + other[1])
def __len__(self):
return 2
def __iter__(self):
self.current = 0
return self
def __next__(self):
if self.current >= len(self):
raise StopIteration
else:
self.current += 1
if self.current == 1:
return self.x
else:
return self.y
if __name__ == '__main__':
c = Coordinates(5,6)
print(*c)
| [
"[email protected]"
] | |
a01b71e2dae640d49f54d02cf08acedbab149c70 | 961931333838aebe8bd17c30c19f3994e32d76ce | /src/leetcode/bfs/279. Perfect Squares.py | 128380fcb8630cd5d95ab5e6415f0e7e36e9fcdd | [] | no_license | MTGTsunami/LeetPython | 5161f9e31dc2ab1855123c2a3a151eb6f4d889bc | f7f3839f631f08a9e5bf8a02398b940f82e43e67 | refs/heads/master | 2023-04-17T16:59:45.621291 | 2021-04-26T07:24:50 | 2021-04-26T07:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | """
Given a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
"""
class MySolution(object): # A little bit larger than O(n) time
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * n
for i in range(1, n + 1):
sqrt = i ** 0.5
floor = int(sqrt)
if sqrt - floor == 0:
square[i - 1] = 1
nearest = floor
else:
while floor >= 1:
square[i - 1] = min(square[i - floor ** 2 - 1] + 1, square[i - 1])
floor -= 1
return square[-1]
class SolutionDP(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * (n + 1)
square[0] = 0
for i in range(1, n + 1):
j = 1
while j * j <= i:
square[i] = min(square[i - j * j] + 1, square[i])
j += 1
return square[-1]
class SolutionMath(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
def isSquare(n):
return (n ** 0.5 - int(n ** 0.5)) == 0
# Based on Lagrange's Four Square theorem, there
# are only 4 possible results: 1, 2, 3, 4.
# If n is a perfect square, return 1.
if isSquare(n):
return 1
# The result is 4 if and only if n can be written in the form of 4^k*(8*m + 7).
# Please refer to Legendre's four-square theorem.
while n % 4 == 0:
n /= 4
if n % 8 == 7:
return 4
for i in range(1, int(n ** 0.5) + 1):
if isSquare(n - i * i):
return 2
return 3
class SolutionBFS(object): # Important
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
depth = 0
nodes = set([n])
edges = [i * i for i in range(1, int(n ** 0.5) + 1)]
while True:
depth += 1
nextLevel = set()
for node in nodes:
for edge in edges:
if edge == node:
return depth
elif edge < node:
nextLevel.add(node - edge)
else:
break
nodes = nextLevel
| [
"[email protected]"
] | |
2c12a85637d4448821f4e08fab01976870d8fdca | b3330bd3365767b89afb9c432f4deb722b39ac1c | /python/sort/selection_sort/selection_sort_10.py | d0142c054e2de0f2b0945ab15e296cef179f94f5 | [] | no_license | hguochen/algorithms | 944df332d5b39220bd59cbd62dc74b12e335fb9e | 703e71a5cd9e002d800340df879ed475a404d092 | refs/heads/master | 2022-02-27T12:11:10.607042 | 2022-02-18T21:04:00 | 2022-02-18T21:04:00 | 13,767,503 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # selection sort
def selection_sort(array):
"""
Divides the array into unsorted and sorted sublist. Left sublist contains
list of sorted elements, right sublist contains list of unsorted elements.
Find the least element in unsorted list and put in sorted list.
"""
# traverse the array
for i in xrange(len(array)):
# initialize min index
min_index = i
# find the least element in unsorted list and update min index
for j in xrange(i+1, len(array)):
if array[j] < array[min_index]:
min_index = j
# swap current element with min index value
array[i], array[min_index] = array[min_index], array[i]
# return array
return array
| [
"[email protected]"
] | |
83b9398ebef1b2841d29cff940e0595b3f5478ce | aa03bf381871d69fd93143c1697cdcd421cbe7e8 | /src/imageqa_visprior.py | 8c75929f9f66ec24a11fe8b9521fbd7954f5eb17 | [
"MIT"
] | permissive | standardgalactic/imageqa-public | 369073d2e3a9a454986533bb872445c8cafab95f | 4e3ceb092495fb8c1056e55b870631907bb31d46 | refs/heads/master | 2023-06-22T11:59:09.031307 | 2016-03-23T21:56:07 | 2016-03-23T21:56:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,898 | py | import sys
import os
import numpy as np
import nn
import imageqa_test as it
from nltk.corpus import wordnet
lexnameDict = {}
def lookupLexname(word):
if lexnameDict.has_key(word):
return lexnameDict[word]
else:
synsets = wordnet.synsets(word)
# Just pick the first definition
if len(synsets) > 0:
lexname = synsets[0].lexname()
lexnameDict[word] = lexname
return lexname
else:
return None
def locateObjLocation(data, questionDict, questionIdict):
"""
Locate the object of where questions.
Very naive heuristic: take the noun immediately after "where".
"""
where = questionDict['where']
for t in range(data.shape[0] - 1):
if data[t, 0] == where:
for u in range(t + 1, data.shape[0]):
word = questionIdict[data[u, 0] - 1]
lexname = lookupLexname(word)
if (lexname is not None and \
lexname.startswith('noun')) or \
(lexname is None):
return data[u, 0]
print 'not found'
return data[-1, 0]
def locateObjNumberNoun(data, questionDict, questionIdict):
"""
Locate the object of how many questions.
Very naive heuristic: take the noun immediately after "how many".
"""
how = questionDict['how']
many = questionDict['many']
for t in range(data.shape[0] - 2):
if data[t, 0] == how and \
data[t + 1, 0] == many:
for u in range(t + 2, data.shape[0]):
word = questionIdict[data[u, 0] - 1]
lexname = lookupLexname(word)
if (lexname is not None and \
lexname.startswith('noun')) or \
(lexname is None):
return data[u, 0]
print 'not found'
return data[-1, 0]
def locateObjNumber(data, questionDict):
"""
Locate the object of how many questions.
Very naive heuristic: take the word immediately after "how many".
"""
how = questionDict['how']
many = questionDict['many']
for t in range(data.shape[0] - 2):
if data[t, 0] == how and \
data[t + 1, 0] == many:
return data[t + 2, 0]
print 'not found'
def locateObjColor(data):
tmp = 0
for i in range(data.shape[0]):
if data[i, 0] != 0:
tmp = data[i, 0]
else:
return tmp
def extractObjId(
data,
questionType,
questionDict,
questionIdict):
objIds = []
for n in range(data.shape[0]):
if questionType == 'color':
objId = locateObjColor(data[n])
elif questionType == 'number':
objId = locateObjNumberNoun(data[n], questionDict, questionIdict)
elif questionType == 'location':
objId = locateObjLocation(data[n], questionDict, questionIdict)
objIds.append(objId)
return np.array(objIds, dtype='int')
def reindexObjId(
inputData,
objDict,
questionDict,
questionIdict,
questionType):
questionIdictArray = np.array(questionIdict, dtype='object')
objIds = extractObjId(
inputData,
questionType,
questionDict,
questionIdict)
objIds = objIds - 1
obj = questionIdictArray[objIds]
objIds2 = np.zeros(objIds.shape, dtype='int')
for i in range(obj.shape[0]):
if objDict.has_key(obj[i]):
objIds2[i] = objDict[obj[i]]
else:
objIds2[i] = objDict['UNK']
return objIds2
def buildObjDict(
trainData,
questionType,
questionDict,
questionIdict):
objDict = {}
objIdict = []
objIds = extractObjId(
trainData[0],
questionType,
questionDict,
questionIdict)
objIds = objIds - 1
questionIdictArray = np.array(questionIdict, dtype='object')
objList = questionIdictArray[objIds]
for obj in objList:
if not objDict.has_key(obj):
objDict[obj] = len(objIdict)
objIdict.append(obj)
objDict['UNK'] = len(objIdict)
objIdict.append('UNK')
return objDict, objIdict
def trainCount(
trainData,
questionType,
questionDict,
questionIdict,
objDict,
objIdict,
numAns):
"""
Calculates count(w, a), count(a)
"""
count_wa = np.zeros((len(objIdict), numAns))
count_a = np.zeros((numAns))
objIds = extractObjId(
trainData[0],
questionType,
questionDict,
questionIdict)
for i in range(objIds.shape[0]):
objId = objIds[i]
obj = questionIdict[objId - 1]
ansId = trainData[1][i, 0]
objId2 = objDict[obj]
count_wa[objId2, ansId] += 1
count_a[ansId] += 1
# Add UNK count
count_a[-1] += 1
return count_wa, count_a
def runVisPriorOnce(
objId,
count_wa,
count_a,
modelOutput,
delta):
P_w_a = count_wa[objId, :]
P_w_a /= count_a[:]
P_w_a += delta
P_w_a /= (modelOutput.shape[1] * delta + 1)
# (n, c)
P_a_i = modelOutput
# (n, c)
P_wai = P_w_a * P_a_i
P_a_wi = P_wai / np.sum(P_wai, axis=1).reshape(P_wai.shape[0], 1)
return P_a_wi
def calcRate(output, target):
outputMax = np.argmax(output, axis=-1)
outputMax = outputMax.reshape(outputMax.size)
targetReshape = target.reshape(target.size)
equals = (outputMax == targetReshape).astype('int')
rate = np.sum(equals) / \
float(target.size)
return rate, outputMax, equals
def validDelta(
trainData,
validData,
preVisModelOutput,
questionDict,
questionIdict,
numAns,
deltas,
questionType):
objDict, objIdict = buildObjDict(
trainData,
questionType,
questionDict,
questionIdict)
count_wa, count_a = trainCount(
trainData,
questionType,
questionDict,
questionIdict,
objDict,
objIdict,
numAns)
print count_wa
# Reindex valid set
validInput = validData[0]
validTarget = validData[1]
validTargetReshape = validTarget.reshape(validTarget.size)
validObjId = reindexObjId(
validInput,
objDict,
questionDict,
questionIdict,
questionType)
# Run vis model on valid set
validOutput = nn.test(preVisModel, validInput)
print 'Before Prior Valid Accuracy:',
rate, _, __ = calcRate(validOutput, validTarget)
print rate
# Determine best delta
bestRate = 0.0
bestDelta = 0.0
for delta in deltas:
visPriorOutput = runVisPriorOnce(
validObjId,
count_wa,
count_a,
validOutput,
delta)
print 'delta=%f Valid Accuracy:' % delta,
rate, _, __ = calcRate(visPriorOutput, validTarget)
print rate
if rate > bestRate:
bestRate = rate
bestDelta = delta
print 'Best Delta:', bestDelta
return bestDelta
def runVisPrior(
trainData,
testData,
questionType,
visModel,
questionDict,
questionIdict,
numAns,
delta):
objDict, objIdict = buildObjDict(
trainData,
questionType,
questionDict,
questionIdict)
count_wa, count_a = trainCount(
trainData,
questionType,
questionDict,
questionIdict,
objDict,
objIdict,
numAns)
print count_wa
# Reindex test set
testInput = testData[0]
testTarget = testData[1]
testTargetReshape = testTarget.reshape(testTarget.size)
testObjId = reindexObjId(
testInput,
objDict,
questionDict,
questionIdict,
questionType)
# Run vis model on test set
testOutput = nn.test(visModel, testInput)
print 'Before Prior Test Accuracy:',
rate, _, __ = calcRate(testOutput, testTarget)
print rate
# Run on test set
visPriorOutput = runVisPriorOnce(
testObjId,
count_wa,
count_a,
testOutput,
delta)
print 'delta=%f Test Accuracy:' % delta,
rate, _, __ = calcRate(visPriorOutput, testTarget)
print rate
return visPriorOutput
def combineTrainValid(trainData, validData):
trainDataAll = (np.concatenate((trainData[0], validData[0]), axis=0),
np.concatenate((trainData[1], validData[1]), axis=0))
return trainDataAll
def calcAdaBoostAlpha(testOutput, testTarget):
print 'Calculating alpha for boosting...'
rate, _, correct = calcRate(testOutput, testTarget)
alpha = np.log(rate / (1 - rate)) + np.log(float(testOutput.shape[1] - 1))
print 'alpha:', alpha
return alpha
def calcAdaBoostWeights(trainOutput, trainTarget, alpha):
print 'Calculating weights for boosting...'
rate, _, correct = calcRate(trainOutput, trainTarget)
print correct
print 'Train set rate:', rate
correct2 = -(correct.astype('float32') - 0.5) * 2
weights = np.exp(correct2 * alpha)
weights /= np.sum(weights)
weights *= weights.shape[0]
print 'weights:', weights
return weights
if __name__ == '__main__':
"""
Usage:
python imageqa_visprior.py
-pvid {preVisModelId}
-vid {visModelId}
-mid {mainModelId}
-bid {boostModelId}
-vd[ata] {visDataFolder}
-md[ata] {mainDataFolder}
-r[esults] {resultsFolder}
-qtype {color/number/location}
-o[utweights] {outputFolder}
"""
questionType = 'color'
visModelId = None
mainModelId = None
boostModelId = None
outputWeightsFolder = None
for i, flag in enumerate(sys.argv):
if flag == '-pvid':
preVisModelId = sys.argv[i + 1]
elif flag == '-vid':
visModelId = sys.argv[i + 1]
elif flag == '-mid':
mainModelId = sys.argv[i + 1]
elif flag == '-bid':
boostModelId = sys.argv[i + 1]
elif flag == '-vd' or flag == '-vdata':
visDataFolder = sys.argv[i + 1]
elif flag == '-md' or flag == '-mdata':
mainDataFolder = sys.argv[i + 1]
elif flag == '-r' or flag == '-results':
resultsFolder = sys.argv[i + 1]
elif flag == '-qtype':
questionType = sys.argv[i + 1]
elif flag == '-o' or flag == '-outweights':
outputWeightsFolder = sys.argv[i + 1]
data = it.loadDataset(visDataFolder)
testInput = data['testData'][0]
testTarget = data['testData'][1]
deltas = \
[0.000001,
0.000005,
0.00001,
0.00005,
0.0001,
0.0005,
0.001,
0.005,
0.01,
0.05,
0.1,
0.5,
1.0]
preVisModel = it.loadModel(preVisModelId, resultsFolder)
print 'Num answer', len(data['ansIdict'])
bestDelta = validDelta(
data['trainData'],
data['validData'],
preVisModel,
data['questionDict'],
data['questionIdict'],
len(data['ansIdict']),
deltas,
questionType)
trainDataAll = combineTrainValid(data['trainData'], data['validData'])
visModel = it.loadModel(visModelId, resultsFolder)
visTestOutput = runVisPrior(trainDataAll,
data['testData'],
questionType,
visModel,
data['questionDict'],
data['questionIdict'],
len(data['ansIdict']),
bestDelta)
visModelFolder = os.path.join(resultsFolder, visModelId)
answerFilename = os.path.join(visModelFolder,
visModelId + '_prior.test.o.txt')
truthFilename = os.path.join(visModelFolder,
visModelId + '_prior.test.t.txt')
it.outputTxt(
visTestOutput,
testTarget,
data['ansIdict'],
answerFilename,
truthFilename,
topK=1,
outputProb=False)
it.runWups(answerFilename, truthFilename)
if mainModelId is not None:
data_m = it.loadDataset(mainDataFolder)
ansDict_m = data_m['ansDict']
ansIdict = data['ansIdict']
questionDict_m = data_m['questionDict']
questionIdict = data['questionIdict']
newTestInput = np.zeros(testInput.shape, dtype='int')
for n in range(testInput.shape[0]):
newTestInput[n, 0, 0] = testInput[n, 0, 0]
for t in range(1, testInput.shape[1]):
if testInput[n, t, 0] != 0:
word = questionIdict[testInput[n, t, 0] - 1]
newTestInput[n, t, 0] = questionDict_m[word]
else:
break
mainModel = it.loadModel(mainModelId, resultsFolder)
mainTestOutput = nn.test(mainModel, newTestInput)
# Need to extract the class output from mainTestOutput
classNewId = []
for ans in ansIdict:
classNewId.append(ansDict_m[ans])
classNewId = np.array(classNewId, dtype='int')
mainTestOutput = mainTestOutput[:, classNewId]
for i in range(len(ansIdict)):
mixRatio = i / 10.0
ensTestOutput = mixRatio * visTestOutput + \
(1 - mixRatio) * mainTestOutput
print '%.2f VIS+PRIOR & %.2f VIS+BLSTM Accuracy:' % \
(mixRatio, 1 - mixRatio),
rate, _, __ = calcRate(ensTestOutput, testTarget)
print rate
if boostModelId is not None:
boostModel = it.loadModel(boostModelId, resultsFolder)
boostTestOutput = nn.test(boostModel, testInput)
alpha = calcAdaBoostAlpha(visTestOutput, testTarget)
alphaBoost = calcAdaBoostAlpha(boostTestOutput, testTarget)
finalTestOutput = (alpha * visTestOutput + \
alphaBoost * boostTestOutput) / \
(alpha + alphaBoost)
rate, _, __ = calcRate(finalTestOutput, testTarget)
answerFilename = os.path.join(visModelFolder,
visModelId + '_boost.test.o.txt')
truthFilename = os.path.join(visModelFolder,
visModelId + '_boost.test.t.txt')
it.outputTxt(
finalTestOutput,
testTarget,
data['ansIdict'],
answerFilename,
truthFilename,
topK=1,
outputProb=False)
it.runWups(answerFilename, truthFilename)
if outputWeightsFolder is not None:
if not os.path.exists(outputWeightsFolder):
os.makedirs(outputWeightsFolder)
alpha = calcAdaBoostAlpha(visTestOutput, testTarget)
visTrainOutput = runVisPrior(trainDataAll,
trainDataAll,
questionType,
visModel,
data['questionDict'],
data['questionIdict'],
len(data['ansIdict']),
bestDelta)
weights = calcAdaBoostWeights(visTrainOutput, trainDataAll[1], alpha)
trainWeights = weights[:data['trainData'][1].shape[0]]
validWeights = weights[trainWeights.shape[0]:]
np.save(os.path.join(outputWeightsFolder, 'adb-weights-train.npy'), trainWeights)
np.save(os.path.join(outputWeightsFolder, 'adb-weights-valid.npy'), validWeights) | [
"[email protected]"
] | |
2db11fc713334d1c4d17ecf444cf9726e26cc5dd | 055cf8aeec011f67580bf92a83d94ee6919648cd | /migrations/versions/ad28a44f93c4_initial_migration.py | 18999b6182f1570c2b30ca638cbdbed3b8a6a43e | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | RisperAkinyi/BlogPost | df82c8fec558425ca1bbce65aa90464176aefb87 | f8ee4c887fceae8e70410b66a12bc5680cf26044 | refs/heads/master | 2022-09-30T19:09:27.969983 | 2019-08-13T07:36:26 | 2019-08-13T07:36:26 | 201,879,164 | 0 | 0 | MIT | 2022-09-16T18:07:44 | 2019-08-12T07:22:39 | Python | UTF-8 | Python | false | false | 2,128 | py | """Initial Migration
Revision ID: ad28a44f93c4
Revises:
Create Date: 2019-08-09 11:05:50.912878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad28a44f93c4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comments', sa.String(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('comments')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
08a41f586570d5ba0baa10410a977b1169ac947f | 4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7 | /mapping/migrations/0033_auto_20170129_0939.py | 90fce4536a94b43eded5f95299f301669aa5c874 | [] | no_license | quentin-david/heimdall | f72a85606e7ab53683df2023ef5eaba762198211 | 84a429ee52e1891bc2ee4eb07a084dff209c789c | refs/heads/master | 2021-01-21T10:26:28.895663 | 2017-07-21T19:19:46 | 2017-07-21T19:19:46 | 83,432,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-29 09:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapping', '0032_servicewebserver_reverse_proxy'),
]
operations = [
migrations.AlterField(
model_name='servicereverseproxy',
name='servername',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| [
"[email protected]"
] | |
22f6c9a8e5f0d726c42869ef47714dc1722c3f56 | 1d230df0e7b96bdb9d0b56f6f14ac5379915ed4f | /tensorflow/python/keras/metrics.py | b18f12612a849d8d8b7e2465ff8075d35764000e | [
"Apache-2.0"
] | permissive | plddxr/tensorflow | afc5404ca9c089ca84700f9f055ef2bdc6c824f5 | 611edc8c515678c1d1b95ece09e6a374c9790716 | refs/heads/master | 2020-03-25T21:49:46.011109 | 2018-08-09T18:38:42 | 2018-08-09T18:43:28 | 144,191,426 | 1 | 0 | Apache-2.0 | 2018-08-09T18:48:44 | 2018-08-09T18:48:43 | null | UTF-8 | Python | false | false | 21,647 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import types
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import cosine_proximity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
def check_is_tensor_or_operation(x, name):
"""Raises type error if the given input is not a tensor or operation."""
if not (isinstance(x, ops.Tensor) or isinstance(x, ops.Operation)):
raise TypeError('{0} must be a Tensor or Operation, given: {1}'.format(
name, x))
def update_state_wrapper(update_state_fn):
"""Decorator to wrap metric `update_state()` with `defun()`, `add_update()`.
Args:
update_state_fn: function that accumulates metric statistics.
Returns:
If eager execution is enabled, returns None.
If graph execution is enabled, returns an update op. This op should be
executed to update the metric state with the given inputs.
"""
def decorated(metric_obj, *args, **kwargs):
"""Decorated function with `defun()` and `add_update()`."""
# Converting update_state_fn() into a graph function, so that
# we can return a single op that performs all of the variable updates.
# Assigning to a different method name to avoid reference cycle.
defuned_update_state_fn = function.defun(update_state_fn)
update_op = defuned_update_state_fn(*args, **kwargs)
if update_op is not None: # update_op will be None in eager execution.
metric_obj.add_update(update_op, inputs=True)
check_is_tensor_or_operation(
update_op, 'Metric {0}\'s update'.format(metric_obj.name))
return update_op
return tf_decorator.make_decorator(update_state_fn, decorated)
def result_wrapper(result_fn):
"""Decorator to wrap metric `result()` function in `merge_call()`.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
If metric state variables are distributed across towers/devices and
`result()` is requested from the context of one device - This function wraps
`result()` in a distribution strategy `merge_call()`. With this,
the metric state variables will be aggregated across devices.
Args:
result_fn: function that computes the metric result.
Returns:
The metric result tensor.
"""
def decorated(metric_obj, *args):
"""Decorated function with merge_call."""
tower_context = distribute_lib.get_tower_context()
if tower_context is None: # if in cross tower context already
result_t = result_fn(*args)
else:
# TODO(psv): Test distribution of metrics using different distribution
# strategies.
# Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
# with distribution object as the first parameter. We create a wrapper
# here so that the result function need not have that parameter.
def merge_fn_wrapper(distribution, merge_fn, *args):
# We will get `PerDevice` merge function. Taking the first one as all
# are identical copies of the function that we had passed below.
return distribution.unwrap(merge_fn)[0](*args)
# Wrapping result in merge_call. merge_call is used when we want to leave
# tower mode and compute a value in cross tower mode.
result_t = tower_context.merge_call(merge_fn_wrapper, result_fn, *args)
check_is_tensor_or_operation(result_t,
'Metric {0}\'s result'.format(metric_obj.name))
return result_t
return tf_decorator.make_decorator(result_fn, decorated)
def safe_div(numerator, denominator):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A `Tensor`.
denominator: A `Tensor`, with dtype matching `numerator`.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero)
def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
"""Squeeze or expand last dimension if needed.
1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
(using `confusion_matrix.remove_squeezable_dimensions`).
2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed,
`sample_weight` could be extended by one dimension.
"""
if y_true is not None:
# squeeze last dim of `y_pred` or `y_true` if their rank differs by 1
y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
y_true, y_pred)
y_pred.get_shape().assert_is_compatible_with(y_true.get_shape())
if sample_weight is None:
return y_pred, y_true, None
sample_weight = ops.convert_to_tensor(sample_weight)
weights_shape = sample_weight.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0: # If weights is scalar, do nothing.
return y_pred, y_true, sample_weight
y_pred_shape = y_pred.get_shape()
y_pred_rank = y_pred_shape.ndims
if (y_pred_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - y_pred_rank == 1:
sample_weight = array_ops.squeeze(sample_weight, [-1])
elif y_pred_rank - weights_rank == 1:
sample_weight = array_ops.expand_dims(sample_weight, [-1])
return y_pred, y_true, sample_weight
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(sample_weight)
rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff,
-1), lambda: array_ops.expand_dims(sample_weight, [-1]),
lambda: sample_weight)
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# squeeze or expand last dim of `sample_weight` if its rank differs by 1
# from the new rank of `y_pred`.
sample_weight = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
_maybe_adjust_weights)
return y_pred, y_true, sample_weight
class Metric(Layer):
"""Encapsulates metric logic and state.
Usage with eager execution:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with graph execution:
```python
m = SomeMetric(...)
init_op = tf.global_variables_initializer() # Initialize variables
with tf.Session() as sess:
sess.run(init_op)
for input in ...:
update_op = m.update_state(input)
sess.run(update_op)
print('Final result: ', sess.run(m.result()))
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```
class BinaryTruePositives(Metric):
def __init__(self, name='binary-true-positives', dtype=None):
super(BinaryTruePositives, self).__init__(name=name, dtype=dtype)
self.true_positives = self.add_weight(
'true_positives', initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, dtypes.bool)
y_pred = math_ops.cast(y_pred, dtypes.bool)
y_pred, y_true, sample_weight = squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight)
values = math_ops.logical_and(
math_ops.equal(y_true, True), math_ops.equal(y_pred, True))
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
values = math_ops.multiply(values, sample_weight)
state_ops.assign_add(self.true_positives, math_ops.reduce_sum(values))
def result(self):
return array_ops.identity(self.true_positives)
```
"""
__metaclass__ = ABCMeta
def __init__(self, name=None, dtype=None):
super(Metric, self).__init__(name=name, dtype=dtype)
self.stateful = True # All metric layers are stateful.
self.built = True
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls, *args, **kwargs)
obj.update_state = types.MethodType(
update_state_wrapper(obj.update_state), obj)
obj.result = types.MethodType(result_wrapper(obj.result), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
with ops.control_dependencies([update_op]):
return self.result() # pylint: disable=not-callable
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
for v in self.variables:
K.set_value(v, 0)
@abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
and adds the update op to the metric layer.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
NotImplementedError('Must be implemented in subclasses.')
@abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
def add_weight(self,
name,
shape=(),
aggregation=vs.VariableAggregation.SUM,
synchronization=vs.VariableSynchronization.ON_READ,
initializer=None):
"""Adds state variable. Only for use by subclasses."""
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype,
trainable=False,
initializer=initializer,
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
class Mean(Metric):
"""Computes the (weighted) mean of the given values.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
"""
def __init__(self, name='mean', dtype=None):
"""Creates a `Mean` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Mean, self).__init__(name=name, dtype=dtype)
# Create new state variables
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the mean.
For example, if `values` is [1, 3, 5, 7] then the mean is 4. If
the `sample_weight` is specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
"""
values = math_ops.cast(values, self._dtype)
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = squeeze_or_expand_dimensions(
values, None, sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.reduce_sum(sample_weight)
values = math_ops.multiply(values, sample_weight)
values = math_ops.reduce_sum(values)
# Update state variables
state_ops.assign_add(self.total, values)
state_ops.assign_add(self.count, num_values)
def result(self):
return safe_div(self.total, self.count)
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `MeanMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be
a `Tensor` whose rank is either 0, or the same rank as `y_true`,
and must be broadcastable to `y_true`.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true, sample_weight = squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = self._fn_kwargs
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
"""
def __init__(self, name='binary-accuracy', dtype=None, threshold=0.5):
"""Creates a `BinaryAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
"""
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@tf_export('keras.metrics.binary_accuracy')
def binary_accuracy(y_true, y_pred, threshold=0.5):
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@tf_export('keras.metrics.categorical_accuracy')
def categorical_accuracy(y_true, y_pred):
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
def sparse_categorical_accuracy(y_true, y_pred):
return math_ops.cast(
math_ops.equal(
math_ops.reduce_max(y_true, axis=-1),
math_ops.cast(math_ops.argmax(y_pred, axis=-1), K.floatx())),
K.floatx())
@tf_export('keras.metrics.top_k_categorical_accuracy')
def top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), axis=-1)
@tf_export('keras.metrics.sparse_top_k_categorical_accuracy')
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(
nn.in_top_k(y_pred,
math_ops.cast(math_ops.reduce_max(y_true, axis=-1), 'int32'),
k),
axis=-1)
# Aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine = cosine_proximity
@tf_export('keras.metrics.serialize')
def serialize(metric):
return serialize_keras_object(metric)
@tf_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@tf_export('keras.metrics.get')
def get(identifier):
if isinstance(identifier, dict):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier: %s' % identifier)
| [
"[email protected]"
] | |
6be743b4b02d6eb6d7f62aab46ff57260ffa042b | f92dfdebb4bf6bc108f51783333520c35afa66da | /api-web/src/www/application/management/commands/publish_rabbitmq_genome_gene.py | 23f7465ee4e41b1adf971b243ae030a6a568b6ea | [] | no_license | duytran92-cse/nas-genodata | 4d8659a135913d226842ff6a013324714ead0458 | 80c88f42145f729c5862a5293012e71548182e1d | refs/heads/master | 2022-11-13T17:24:03.769605 | 2020-06-14T18:59:36 | 2020-06-14T18:59:36 | 272,264,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,305 | py | import json, pika, os
from application.models import *
from urad_api import registry
from urad_api_standard.commands import Command as BaseCommand
from django.conf import settings
import json
from application.modules.gene import components as gene_components
from django.db import connection
class Command(BaseCommand):
## PUBLISH
def publish_to_queue(self, iterator, genome_queue, rabbitmq_host, rabbitmq_port):
credentials = pika.PlainCredentials('guest', 'guest')
connection = pika.BlockingConnection(pika.ConnectionParameters(rabbitmq_host, rabbitmq_port, '/', credentials))
channel = connection.channel()
channel.queue_declare(queue=genome_queue)
for x in iterator:
channel.basic_publish(exchange='', routing_key=genome_queue, body=json.dumps(x))
connection.close()
def process(self, params = {}):
# DECLARE VARIABLE
GENOME_QUEUE = settings.GENOME_QUEUE
RABBITMQ_HOST = settings.RABBITMQ_HOST
RABBITMQ_PORT = int(settings.RABBITMQ_PORT)
# Starting
print "[x] Publish data to rabbitmq"
##########################
## Gene
print "[***] Publish GENE data to rabbitmq"
isDone = False
start = 0
gene_manager = gene_components.DataManager()
while not isDone:
end = start + 5000
print 'start: %s, end: %s' % (start, end)
gene = Gene.objects.all()[start:end]
start = end + 1
if gene.count() <= 0:
isDone = True
x = []
for var in gene:
y = ['gene', var.code]
try:
data = gene_manager.get(var.code)
values = {}
arr_disease = []
asso_disease = []
asso_pub = []
for field, value in data.items():
if field in ['synonyms', 'effects','start', 'end','num_exon','chromosome','protein_product','description'] and value['value'] != None:
values[field] = value['value']
# disease field
if field == 'disgenet-diseases' and value['value'] != None:
arr_disease.extend(value['value'])
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'gwas-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('sentence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'ctdbase-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('evidence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if len(arr_disease) > 0:
values['disgenet-diseases'] = arr_disease
if len(asso_disease) > 0:
values['associated_diseases'] = asso_disease
# publications
if field == 'publications' and value['value'] != None:
values[field] = value['value']
try:
for k in value['value']:
asso_pub.append({
'pmid': k['pmid'],
'title': k['title']
})
except Exception as e:
pass
if field == 'gwas-publications' and value['value'] != None:
asso_pub.extend(value['value'])
if len(asso_pub) > 0:
values['associated_publications'] = asso_pub
if values:
y.append(values)
x.append(y)
except Exception as e:
pass
# Publish rabbitMQ
self.publish_to_queue(x, GENOME_QUEUE, RABBITMQ_HOST, RABBITMQ_PORT)
print "[***] DONE gene"
print "[x] Sent data to RabbitMQ"
| [
"[email protected]"
] | |
a43e6873d5770d466c0143a8d8e3abdff3975ac4 | 4bc19f4dd098ebedcb6ee78af0ae12cb633671fe | /static/views.py | 608e8568b487fbee9eb1251fbf226fbe6d45ec5b | [] | no_license | StanislavKraev/rekvizitka | 958ab0e002335613a724fb14a8e4123f49954446 | ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f | refs/heads/master | 2021-01-01T05:44:56.372748 | 2016-04-27T19:20:26 | 2016-04-27T19:20:26 | 57,240,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from django.http import Http404
from django.shortcuts import render_to_response
from rek.static.models import StaticPage
from django.template.context import RequestContext
def render(request, page_alias=''):
page = StaticPage.objects.get(alias=page_alias, enabled=True)
if not page:
raise Http404()
return render_to_response('static_page_with_sidebar.html',
{'page' : page},
context_instance=RequestContext(request))
| [
"[email protected]"
] | |
e2a4d4248d4f5b48e5c69c52e0dad41e541340ba | 33cfcb4561e7320ae0e893fbe774c7eb0a2effe8 | /eg15.01.py | c94d345080db1688fdbb1a237e7fd737f5e8db93 | [] | no_license | Jueee/aByteOfPython | 9c8bc01f0707daef29e52467db0c3f5a94747119 | ae1a4a4b181612463ccdcd0d89c961f22f7ece20 | refs/heads/master | 2021-05-31T14:26:00.790823 | 2016-02-17T05:41:20 | 2016-02-17T05:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | #!/usr/bin/python
# Filename: list_comprehension.py
# 通过列表综合,可以从一个已有的列表导出一个新的列表。
listone = [2, 3, 4]
listtwo = [2*i for i in listone if i > 2]
print(listtwo)
# 在函数中接收元组和列表
# 当要使函数接收元组或字典形式的参数的时候,有一种特殊的方法,它分别使用*和**前缀。
# 这种方法在函数需要获取可变数量的参数的时候特别有用。
# 由于在args变量前有*前缀,所有多余的函数参数都会作为一个元组存储在args中。
# 如果使用的是**前缀,多余的参数则会被认为是一个字典的键/值对。
def powersum(power, *args):
'''Return the sum of each argument raised to specified power.'''
total = 0
for i in args:
total += pow(i, power)
return total
print(powersum(2,3,4,5))
print(powersum(2,10,100,1000))
| [
"hellojue @foxmail.com"
] | hellojue @foxmail.com |
5da2bd8dc2830c9ae5ea68845892e133cd447295 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq2210.py | 06f183066edd0d13b690b7e34154e944725a31e0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=28
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.rx(-1.9069467407290044).on(input_qubit[2])) # number=20
c.append(cirq.H.on(input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.rx(0.13823007675795101).on(input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.X.on(input_qubit[3])) # number=1
c.append(cirq.rx(-1.9352210746113125).on(input_qubit[3])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[2])) # number=22
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=17
c.append(cirq.CZ.on(input_qubit[3],input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=25
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=26
c.append(cirq.H.on(input_qubit[0])) # number=27
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[3])) # number=23
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2210.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
01b5228bafb4cd7e36afa383714ca0ce95b4d5dd | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/anlangner/cordis_v3.py | c960031a72d020159d2fc051da824933e00894a7 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,592 | py | import scraperwiki
import scrapemark
import feedparser
import csv
import re
import urllib2,sys
import requests
import lxml.html
from BeautifulSoup import BeautifulSoup, NavigableString
# extract project page links from the result page "url"
def extract_links(url):
atom_feed = feedparser.parse(url)
link_list = []
for entry in atom_feed.entries:
print entry.title #+ " - " + entry.link
print entry.link
# experiment with data structure
data = {
'TITLE' : entry.title,
'LINK' : entry.link
}
print data
#scraperwiki.sqlite.save(unique_keys=['TITLE'], data=data)
link_list.append(entry.link)
#csvwriter.writerow([entry.title] + [entry.link])
return link_list
# open details page for "object" and parse the results
def parse_object(object):
html = urllib2.urlopen(object).read()
soup = BeautifulSoup(html)
project_id = soup.find('input', attrs={'name':"REF"}).get('value')
print "Project-ID: " + str(project_id)
detail_url = "http://cordis.europa.eu/newsearch/getDoc?doctype=PROJ&xslt-template=projects/xsl/projectdet_en.xslt&rcn=" + str(project_id)
print "***" + detail_url
details = requests.get(detail_url)
detail_page = details.content
content = BeautifulSoup(detail_page, convertEntities="html", smartQuotesTo="html", fromEncoding="utf-8")
# extract content
data_info = content.find(attrs={'class':'projdates'})
data_coordinator = content.find(attrs={'class': 'projcoord'})
data_details = content.find(attrs={'class': 'projdet'})
data_participants = content.find(attrs={'class': 'participants'})
data_footer = content.find(attrs={'id': 'recinfo'})
# data_tech = content.find(attrs={'class': 'tech'})
# trying to find project description: display all content
print ">>> " str(content)
data_info = lxml.html.fromstring(str(data_info))
data_info = data_info.text_content()
data_coordinator = lxml.html.fromstring(str(data_coordinator))
data_coordinator = data_coordinator.text_content()
data_details = lxml.html.fromstring(str(data_details))
data_details = data_details.text_content()
data_participants = lxml.html.fromstring(str(data_participants))
data_participants = data_participants.text_content()
data_footer = lxml.html.fromstring(str(data_footer))
data_footer = data_footer.text_content()
# REGEXP for fields
# Start date in YYYY-MM-DD: (?<=From\s).{1,}(?=\sto)
# End date in YYYY-MM-DD: (?<=to\s).{1,}(?=\s\|)
# Coordinator: (?<=Coordinator\s).{1,}(?=\s\(\+\))
# Coordinator contact: (?<=Administrative contact:\s).{1,}(?!\n)
# Project title in caps: (?<=\|\s).{1,}(?=\swebsite)
# Cost in EUR: (?<=EUR\s)\d{1,2}(\s\d{3}){1,2}
# EU Contribution: (?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)
# Programme acronym: (?<=Programme acronym:\s)(\w{1,}.){2}
# Contract type: (?<=Contract type:\s).{1,}
# Subprogramme type: (?<=Subprogramme area:\s).{1,}(?=Contract)
# Participants: (?<=\n).{1,}?\n.{1,}?(?=\s\n)
# Participant contact: (?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)
# Record number: (?<=Record number:\s)\d{1,}(?=\s\/)
field_regexp = {
'Title' : '(?<=\|\s).{1,}(?=\swebsite)',
'Start date' : '(?<=From\s).{1,}(?=\sto)',
'End date' : '(?<=to\s).{1,}(?=\s\|)',
'Coordinator' : '(?<=Coordinator\n\n).{1,}(?=\n)',
'Coordinator contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?!Email)',
'Project cost' : '(?<=EUR\s)\d{1,2}(\s\d{3}){1,2}',
'EU contribution' : '(?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)',
'Programme' : '(?<=Programme acronym:\s\n)(\w{1,}.){2}',
'Subprogramme' : '(?<=Subprogramme area:\s\n).{1,}(?=\n)',
'Contract' : '(?<=Contract type:\s\n).{1,}',
'Participants' : '(?<=\n).{1,}?\n.{1,}?(?=\s\n)',
'Participant contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)',
'Record number' : '(?<=Record number:\s)\d{1,}(?=\s\/)'
}
# WAAAAH, das hier ist unsagbar hässlich!
project_title = re.search(field_regexp['Title'], data_info)
project_title = project_title.group()
project_start = re.search(field_regexp['Start date'], data_info)
project_start = project_start.group()
project_end = re.search(field_regexp['End date'], data_info)
project_end = project_end.group()
project_coordinator = re.search(field_regexp['Coordinator'], data_coordinator)
project_coordinator = project_coordinator.group()
project_coord_con = re.search(field_regexp['Coordinator contact'], data_coordinator)
project_coord_con = project_coord_con.group()
project_cost = re.search(field_regexp['Project cost'], data_details)
project_cost = project_cost.group()
project_cost = project_cost.replace(" ", "")
project_contribution = re.search(field_regexp['EU contribution'], data_details)
project_contribution = project_contribution.group()
project_contribution = project_contribution.replace(" ", "")
project_programme = re.search(field_regexp['Programme'], data_details)
project_programme = project_programme.group()
project_subprogramme = re.search(field_regexp['Subprogramme'], data_details)
project_subprogramme = project_subprogramme.group()
project_contract = re.search(field_regexp['Contract'], data_details)
project_contract = project_contract.group()
project_participants = re.findall(field_regexp['Participants'], data_participants)
#project_participants = project_participants.group()
project_part_con = re.findall(field_regexp['Participant contact'], data_participants)
#project_part_con = project_part_con.group()
project_reference = re.search(field_regexp['Record number'], data_footer)
project_reference = project_reference.group()
project_desc = {
'Title' : project_title,
'Start date' : project_start,
'End date' : project_end,
'Coordinator' : project_coordinator,
'Coordinator contact' : project_coord_con,
'Project cost' : project_cost,
'EU contribution' : project_contribution,
'Programme' : project_programme,
'Subprogramme' : project_subprogramme,
'Contract' : project_contract,
#'Participants' : project_participants[0],
#'Participant contact' : project_part_con[0],
'Reference' : project_reference
}
scraperwiki.sqlite.save(unique_keys=['Title'], data=project_desc)
print ">>> CORDIS scraper <<<"
applicants = ["rexroth"]
URL_1 = "http://cordis.europa.eu/newsearch/download.cfm?action=query&collection=EN_PROJ&text=%28"
URL_2="%29&sort=all&querySummary=quick&fieldText=%28MATCH%7BCORDIS%2CWEBPAGESEUROPA%7D%3ASOURCE%29&ENGINE_ID=CORDIS_ENGINE_ID&SEARCH_TYPE_ID=CORDIS_SEARCH_ID&descr="
URL_3 = ";%20Projects"
print "Number of searches: " + str(len(applicants))
# Open CSV file
with open ('output.csv', 'w') as csvfile:
csvwriter = csv.writer(open ('output.csv', 'a'))
for applicant in applicants:
list_url = URL_1 + applicant + URL_2 + applicant + URL_3
result_links = extract_links(list_url)
for link in result_links:
parse_object(link)import scraperwiki
import scrapemark
import feedparser
import csv
import re
import urllib2,sys
import requests
import lxml.html
from BeautifulSoup import BeautifulSoup, NavigableString
# extract project page links from the result page "url"
def extract_links(url):
atom_feed = feedparser.parse(url)
link_list = []
for entry in atom_feed.entries:
print entry.title #+ " - " + entry.link
print entry.link
# experiment with data structure
data = {
'TITLE' : entry.title,
'LINK' : entry.link
}
print data
#scraperwiki.sqlite.save(unique_keys=['TITLE'], data=data)
link_list.append(entry.link)
#csvwriter.writerow([entry.title] + [entry.link])
return link_list
# open details page for "object" and parse the results
def parse_object(object):
html = urllib2.urlopen(object).read()
soup = BeautifulSoup(html)
project_id = soup.find('input', attrs={'name':"REF"}).get('value')
print "Project-ID: " + str(project_id)
detail_url = "http://cordis.europa.eu/newsearch/getDoc?doctype=PROJ&xslt-template=projects/xsl/projectdet_en.xslt&rcn=" + str(project_id)
print "***" + detail_url
details = requests.get(detail_url)
detail_page = details.content
content = BeautifulSoup(detail_page, convertEntities="html", smartQuotesTo="html", fromEncoding="utf-8")
# extract content
data_info = content.find(attrs={'class':'projdates'})
data_coordinator = content.find(attrs={'class': 'projcoord'})
data_details = content.find(attrs={'class': 'projdet'})
data_participants = content.find(attrs={'class': 'participants'})
data_footer = content.find(attrs={'id': 'recinfo'})
# data_tech = content.find(attrs={'class': 'tech'})
# trying to find project description: display all content
print ">>> " str(content)
data_info = lxml.html.fromstring(str(data_info))
data_info = data_info.text_content()
data_coordinator = lxml.html.fromstring(str(data_coordinator))
data_coordinator = data_coordinator.text_content()
data_details = lxml.html.fromstring(str(data_details))
data_details = data_details.text_content()
data_participants = lxml.html.fromstring(str(data_participants))
data_participants = data_participants.text_content()
data_footer = lxml.html.fromstring(str(data_footer))
data_footer = data_footer.text_content()
# REGEXP for fields
# Start date in YYYY-MM-DD: (?<=From\s).{1,}(?=\sto)
# End date in YYYY-MM-DD: (?<=to\s).{1,}(?=\s\|)
# Coordinator: (?<=Coordinator\s).{1,}(?=\s\(\+\))
# Coordinator contact: (?<=Administrative contact:\s).{1,}(?!\n)
# Project title in caps: (?<=\|\s).{1,}(?=\swebsite)
# Cost in EUR: (?<=EUR\s)\d{1,2}(\s\d{3}){1,2}
# EU Contribution: (?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)
# Programme acronym: (?<=Programme acronym:\s)(\w{1,}.){2}
# Contract type: (?<=Contract type:\s).{1,}
# Subprogramme type: (?<=Subprogramme area:\s).{1,}(?=Contract)
# Participants: (?<=\n).{1,}?\n.{1,}?(?=\s\n)
# Participant contact: (?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)
# Record number: (?<=Record number:\s)\d{1,}(?=\s\/)
field_regexp = {
'Title' : '(?<=\|\s).{1,}(?=\swebsite)',
'Start date' : '(?<=From\s).{1,}(?=\sto)',
'End date' : '(?<=to\s).{1,}(?=\s\|)',
'Coordinator' : '(?<=Coordinator\n\n).{1,}(?=\n)',
'Coordinator contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?!Email)',
'Project cost' : '(?<=EUR\s)\d{1,2}(\s\d{3}){1,2}',
'EU contribution' : '(?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)',
'Programme' : '(?<=Programme acronym:\s\n)(\w{1,}.){2}',
'Subprogramme' : '(?<=Subprogramme area:\s\n).{1,}(?=\n)',
'Contract' : '(?<=Contract type:\s\n).{1,}',
'Participants' : '(?<=\n).{1,}?\n.{1,}?(?=\s\n)',
'Participant contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)',
'Record number' : '(?<=Record number:\s)\d{1,}(?=\s\/)'
}
# WAAAAH, das hier ist unsagbar hässlich!
project_title = re.search(field_regexp['Title'], data_info)
project_title = project_title.group()
project_start = re.search(field_regexp['Start date'], data_info)
project_start = project_start.group()
project_end = re.search(field_regexp['End date'], data_info)
project_end = project_end.group()
project_coordinator = re.search(field_regexp['Coordinator'], data_coordinator)
project_coordinator = project_coordinator.group()
project_coord_con = re.search(field_regexp['Coordinator contact'], data_coordinator)
project_coord_con = project_coord_con.group()
project_cost = re.search(field_regexp['Project cost'], data_details)
project_cost = project_cost.group()
project_cost = project_cost.replace(" ", "")
project_contribution = re.search(field_regexp['EU contribution'], data_details)
project_contribution = project_contribution.group()
project_contribution = project_contribution.replace(" ", "")
project_programme = re.search(field_regexp['Programme'], data_details)
project_programme = project_programme.group()
project_subprogramme = re.search(field_regexp['Subprogramme'], data_details)
project_subprogramme = project_subprogramme.group()
project_contract = re.search(field_regexp['Contract'], data_details)
project_contract = project_contract.group()
project_participants = re.findall(field_regexp['Participants'], data_participants)
#project_participants = project_participants.group()
project_part_con = re.findall(field_regexp['Participant contact'], data_participants)
#project_part_con = project_part_con.group()
project_reference = re.search(field_regexp['Record number'], data_footer)
project_reference = project_reference.group()
project_desc = {
'Title' : project_title,
'Start date' : project_start,
'End date' : project_end,
'Coordinator' : project_coordinator,
'Coordinator contact' : project_coord_con,
'Project cost' : project_cost,
'EU contribution' : project_contribution,
'Programme' : project_programme,
'Subprogramme' : project_subprogramme,
'Contract' : project_contract,
#'Participants' : project_participants[0],
#'Participant contact' : project_part_con[0],
'Reference' : project_reference
}
scraperwiki.sqlite.save(unique_keys=['Title'], data=project_desc)
print ">>> CORDIS scraper <<<"
applicants = ["rexroth"]
URL_1 = "http://cordis.europa.eu/newsearch/download.cfm?action=query&collection=EN_PROJ&text=%28"
URL_2="%29&sort=all&querySummary=quick&fieldText=%28MATCH%7BCORDIS%2CWEBPAGESEUROPA%7D%3ASOURCE%29&ENGINE_ID=CORDIS_ENGINE_ID&SEARCH_TYPE_ID=CORDIS_SEARCH_ID&descr="
URL_3 = ";%20Projects"
print "Number of searches: " + str(len(applicants))
# Open CSV file
with open ('output.csv', 'w') as csvfile:
csvwriter = csv.writer(open ('output.csv', 'a'))
for applicant in applicants:
list_url = URL_1 + applicant + URL_2 + applicant + URL_3
result_links = extract_links(list_url)
for link in result_links:
parse_object(link) | [
"[email protected]"
] | |
39f29b37f7444cf60b0b9e2cbd3307132c1c48c6 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/pandas/tests/io/parser/test_skiprows.py | 1df2ca4fad4d87539cdcdee874cb25a6cd3ce18e | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 6,948 | py | # -*- coding: utf-8 -*-
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import StringIO, lrange, range
from pandas.errors import EmptyDataError
from pandas import DataFrame, Index
import pandas.util.testing as tm
@pytest.mark.parametrize("skiprows", [lrange(6), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(StringIO(text), skiprows=skiprows, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3], index=index)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_data = "a,b,c\n" + "\n".join([
",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=index)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
dict(skiprows=[1]),
DataFrame([[2, "line 21\nline 22", 2],
[3, "line 31", 1]], columns=["id", "text", "num_lines"])),
("a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
dict(quotechar="~", skiprows=[2]),
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"])),
(("Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"),
dict(quotechar="~", skiprows=[1, 3]),
DataFrame([['example\n sentence\n two', 'url2']],
columns=["Text", "url"]))
])
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2],
[3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=[
"id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,exp_data", [
("""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2],
[3, "line \n'31' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2],
[3, "line '31\n' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2],
[3, "line '31\n' \r\tline 32", 1]]),
])
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("line_terminator", [
"\n", # "LF"
"\r\n", # "CRLF"
"\r" # "CR"
])
def test_skiprows_lineterminator(all_parsers, line_terminator):
# see gh-9079
parser = all_parsers
data = "\n".join(["SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M "])
expected = DataFrame([["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"]],
columns=["date", "time", "var", "flag",
"oflag"])
if parser.engine == "python" and line_terminator == "\r":
pytest.skip("'CR' not respect with the Python parser yet")
data = data.replace("\n", line_terminator)
result = parser.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"])
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = "a\"\nb\"\na\n1"
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,expected", [
(dict(), DataFrame({"1": [3, 5]})),
(dict(header=0, names=["foo"]), DataFrame({"foo": [3, 5]}))
])
def test_skip_rows_callable(all_parsers, kwargs, expected):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
result = parser.read_csv(StringIO(data),
skiprows=lambda x: x % 2 == 0,
**kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_rows_skip_all(all_parsers):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: True)
def test_skip_rows_bad_callable(all_parsers):
msg = "by zero"
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
| [
"[email protected]"
] | |
c6f9bfe889eb0278f68b7a17049662d5605c5285 | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /axial/logging_utils.py | ef723570c0f02a331ebfc7220811665417690c53 | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 4,459 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import time
from absl import logging
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.core.framework.summary_pb2 import Summary
from tensorflow.compat.v1.core.util.event_pb2 import Event
def pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Taken from Jaxboard.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = np.shape(images)
width, height, depth = shape[-3:]
images = np.reshape(images, (-1, width, height, depth))
batch = np.shape(images)[0]
rows = np.minimum(rows, batch)
cols = np.minimum(batch // rows, cols)
images = images[:rows * cols]
images = np.reshape(images, (rows, cols, width, height, depth))
images = np.transpose(images, [0, 2, 1, 3, 4])
images = np.reshape(images, [rows * width, cols * height, depth])
return images
class SummaryWriter(object):
"""Tensorflow summary writer inspired by Jaxboard.
This version doesn't try to avoid Tensorflow dependencies, because this
project uses Tensorflow.
"""
def __init__(self, dir, write_graph=True):
if not gfile.IsDirectory(dir):
gfile.MakeDirs(dir)
self.writer = tf.summary.FileWriter(
dir, graph=tf.get_default_graph() if write_graph else None)
def flush(self):
self.writer.flush()
def close(self):
self.writer.close()
def _write_event(self, summary_value, step):
self.writer.add_event(
Event(
wall_time=round(time.time()),
step=step,
summary=Summary(value=[summary_value])))
def scalar(self, tag, value, step):
self._write_event(Summary.Value(tag=tag, simple_value=float(value)), step)
def image(self, tag, image, step):
image = np.asarray(image)
if image.ndim == 2:
image = image[:, :, None]
if image.shape[-1] == 1:
image = np.repeat(image, 3, axis=-1)
bytesio = io.BytesIO()
PIL.Image.fromarray(image).save(bytesio, 'PNG')
image_summary = Summary.Image(
encoded_image_string=bytesio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
self._write_event(Summary.Value(tag=tag, image=image_summary), step)
def images(self, tag, images, step, square=True):
"""Saves (rows, cols) tiled images from onp.ndarray.
This truncates the image batch rather than padding
if it doesn't fill the final row.
"""
images = np.asarray(images)
n_images = len(images)
if square:
rows = cols = int(np.sqrt(n_images))
else:
rows = 1
cols = n_images
tiled_images = pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
class Log(object):
"""Logging to Tensorboard and the Python logger at the same time."""
def __init__(self, logdir, write_graph=True):
self.logdir = logdir
# Tensorboard
self.summary_writer = SummaryWriter(logdir, write_graph=write_graph)
def write(self, key_prefix, info_dicts, step):
log_items = []
for key in info_dicts[-1]:
# average the log values over time
key_with_prefix = '{}/{}'.format(key_prefix, key)
avg_val = np.mean([info[key] for info in info_dicts])
# absl log
log_items.append('{}={:.6f}'.format(key_with_prefix, avg_val))
# tensorboard
self.summary_writer.scalar(key_with_prefix, avg_val, step=step)
self.summary_writer.flush()
logging.info('step={:08d} {}'.format(step, ' '.join(log_items)))
| [
"[email protected]"
] | |
54157e46485cfe84e785669c8a896e72e4eba04c | 22fc34523f4de64a1e1eea707e01da79e425a043 | /srtmprofile/core/urls.py | 3eb617af2639a39dc20d463863e4fff390506028 | [
"MIT"
] | permissive | marcellobenigno/srtmprofile | 04cdcf4a1f127462dd37d94ec5f368b0f304b932 | 52a2550976ce4ecad2921e53a72ac2ec8a8459b5 | refs/heads/master | 2021-04-03T05:25:54.097968 | 2018-03-15T11:05:02 | 2018-03-15T11:05:02 | 124,605,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.conf.urls import url
from . import views
app_name = 'core'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^roads.geojson$', views.roads_geojson, name='roads_geojson'),
url(r'^(?P<pk>\d+)/$', views.detail, name='detail'),
]
| [
"[email protected]"
] | |
6a6d137d3c8dc70d14aa023a752ffba6f170d4fd | 91af1af67ed219e583b209b40ae5dd34d6f7f355 | /train_net.py | 90d770c1765c7f52a585ded8af49a5bf767545db | [] | no_license | jack20951948/Deep-Clustering | d6f5bfdd97be1f07f114371eafd9f8643ebb6e30 | 4dd8b4d3fef72e597cd142406d343450cf2dd517 | refs/heads/main | 2023-06-28T02:18:58.915727 | 2021-07-18T07:18:10 | 2021-07-18T07:18:10 | 387,109,398 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,555 | py | '''
Script to train the model
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
import ipdb
from datagenerator2 import DataGenerator
from model import Model
from GlobalConstont import *
# the .pkl file lists of data set
pkl_list = ['deep-clustering-master/pkl_folder/train.pkl'] # ['../dcdata/' + str(i) + '.pkl' for i in range(1, 12)]
val_list = ['deep-clustering-master/pkl_folder/val.pkl']
sum_dir = 'deep-clustering-master/sum'
train_dir = 'deep-clustering-master/model'
lr = 1e-3
n_hidden = 300
max_steps = 20000000
batch_size = 128
def train():
with tf.Graph().as_default():
# dropout keep probability
p_keep_ff = tf.placeholder(tf.float32, shape=None)
p_keep_rc = tf.placeholder(tf.float32, shape=None)
# generator for training set and validation set
data_generator = DataGenerator(pkl_list, batch_size)
val_generator = DataGenerator(val_list, batch_size)
# placeholder for input log spectrum, VAD info.,
# and speaker indicator function
in_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
VAD_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
Y_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF, 2])
# init the model
BiModel = Model(n_hidden, batch_size, p_keep_ff, p_keep_rc)
# build the net structure
embedding = BiModel.inference(in_data)
Y_data_reshaped = tf.reshape(Y_data, [-1, NEFF, 2])
VAD_data_reshaped = tf.reshape(VAD_data, [-1, NEFF])
# compute the loss
loss = BiModel.loss(embedding, Y_data_reshaped, VAD_data_reshaped)
# get the train operation
train_op = BiModel.train(loss, lr)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
sess = tf.Session()
# either train from scratch or a trained model
# saver.restore(sess, 'train/model.ckpt-492000')
# val_loss = np.fromfile('val_loss').tolist()
# init_step = 56001
init = tf.initialize_all_variables()
sess.run(init)
init_step = 0
summary_writer = tf.summary.FileWriter(
sum_dir, sess.graph)
# val_loss = []
last_epoch = data_generator.epoch
for step in range(init_step, init_step + max_steps):
start_time = time.time()
data_batch = data_generator.gen_batch()
# concatenate the samples into batch data
in_data_np = np.concatenate(
[np.reshape(item['Sample'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'], [1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
# train the model
loss_value, _, summary_str = sess.run(
[loss, train_op, summary_op],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1 - P_DROPOUT_FF,
p_keep_rc: 1 - P_DROPOUT_RC})
summary_writer.add_summary(summary_str, step)
duration = time.time() - start_time
# if np.isnan(loss_value):
# import ipdb; ipdb.set_trace()
assert not np.isnan(loss_value)
if step % 100 == 0:
# show training progress every 100 steps
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = (
'%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch, epoch %d)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch,
data_generator.epoch))
if step % 4000 == 0:
# save model every 4000 steps
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if last_epoch != data_generator.epoch:
# doing validation every training epoch
print('Doing validation')
val_epoch = val_generator.epoch
count = 0
loss_sum = 0
# average the validation loss
while(val_epoch == val_generator.epoch):
count += 1
data_batch = val_generator.gen_batch()
in_data_np = np.concatenate(
[np.reshape(item['Sample'],
[1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'],
[1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
loss_value, = sess.run(
[loss],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1,
p_keep_rc: 1})
loss_sum += loss_value
val_loss.append(loss_sum / count)
print ('validation loss: %.3f' % (loss_sum / count))
np.array(val_loss).tofile('val_loss')
last_epoch = data_generator.epoch
print('%s start' % datetime.now())
train()
| [
"[email protected]"
] | |
073baf122d23c22628502336b5d6cf068590df1b | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/python/training/session_manager.py | 06084b1d7b3fc728396e775179f7ce788d696e65 | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,336 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and creates session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.util.tf_export import tf_export
def _maybe_name(obj):
"""Returns object name if it has one, or a message otherwise.
This is useful for names that apper in error messages.
Args:
obj: Object to get the name of.
Returns:
name, "None", or a "no name" message.
"""
if obj is None:
return "None"
elif hasattr(obj, "name"):
return obj.name
else:
return "<no name for %s>" % type(obj)
@tf_export("train.SessionManager")
class SessionManager(object):
"""Training helper that restores from checkpoint and creates session.
This class is a small wrapper that takes care of session creation and
checkpoint recovery. It also provides functions that to facilitate
coordination among multiple training threads or processes.
* Checkpointing trained variables as the training progresses.
* Initializing variables on startup, restoring them from the most recent
checkpoint after a crash, or wait for checkpoints to become available.
### Usage:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will checkpoint the model in '/tmp/mydir'.
sm = SessionManager()
sess = sm.prepare_session(master, init_op, saver, checkpoint_dir)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`prepare_session()` initializes or restores a model. It requires `init_op`
and `saver` as an argument.
A second process could wait for the model to be ready by doing the following:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will wait for the model to become ready.
sm = SessionManager()
sess = sm.wait_for_session(master)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`wait_for_session()` waits for a model to be initialized by other processes.
"""
def __init__(self,
local_init_op=None,
ready_op=None,
ready_for_local_init_op=None,
graph=None,
recovery_wait_secs=30):
"""Creates a SessionManager.
The `local_init_op` is an `Operation` that is run always after a new session
was created. If `None`, this step is skipped.
The `ready_op` is an `Operation` used to check if the model is ready. The
model is considered ready if that operation returns an empty 1D string
tensor. If the operation returns a non empty 1D string tensor, the elements
are concatenated and used to indicate to the user why the model is not
ready.
The `ready_for_local_init_op` is an `Operation` used to check if the model
is ready to run local_init_op. The model is considered ready if that
operation returns an empty 1D string tensor. If the operation returns a non
empty 1D string tensor, the elements are concatenated and used to indicate
to the user why the model is not ready.
If `ready_op` is `None`, the model is not checked for readiness.
`recovery_wait_secs` is the number of seconds between checks that
the model is ready. It is used by processes to wait for a model to
be initialized or restored. Defaults to 30 seconds.
Args:
local_init_op: An `Operation` run immediately after session creation.
Usually used to initialize tables and local variables.
ready_op: An `Operation` to check if the model is initialized.
ready_for_local_init_op: An `Operation` to check if the model is ready
to run local_init_op.
graph: The `Graph` that the model will use.
recovery_wait_secs: Seconds between checks for the model to be ready.
Raises:
ValueError: If ready_for_local_init_op is not None but local_init_op is
None
"""
# Sets default values of arguments.
if graph is None:
graph = ops.get_default_graph()
self._local_init_op = local_init_op
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._graph = graph
self._recovery_wait_secs = recovery_wait_secs
self._target = None
if ready_for_local_init_op is not None and local_init_op is None:
raise ValueError("If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "
", ready_for_local_init_op [%s]" %
ready_for_local_init_op)
def _restore_checkpoint(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
def prepare_session(self,
master,
init_op=None,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
init_feed_dict=None,
init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
either running the provided `init_op`, or calling the provided `init_fn`.
The local_init_op is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
ready_for_local_init_op passes.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
if not is_loaded_from_checkpoint:
if init_op is None and not init_fn and self._local_init_op is None:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn or local_init_op was given")
if init_op is not None:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
local_init_success, msg = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for local_init. "
"Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op),
init_fn,
msg))
is_ready, msg = self._model_ready(sess)
if not is_ready:
raise RuntimeError(
"Init operations did not make model ready. "
"Init op: %s, init fn: %s, local_init_op: %s, error: %s" %
(_maybe_name(init_op), init_fn, self._local_init_op, msg))
return sess
def recover_session(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, recovering if possible.
Creates a new session on 'master'. If the session is not initialized
and can be recovered from a checkpoint, recover it.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, initialized) where 'initialized' is `True` if
the session could be recovered and initialized, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
# Always try to run local_init_op
local_init_success, msg = self._try_run_local_init_op(sess)
if not is_loaded_from_checkpoint:
# Do not need to run checks for readiness
return sess, False
restoring_file = checkpoint_dir or checkpoint_filename_with_path
if not local_init_success:
logging.info(
"Restoring model from %s did not make model ready for local init:"
" %s", restoring_file, msg)
return sess, False
is_ready, msg = self._model_ready(sess)
if not is_ready:
logging.info("Restoring model from %s did not make model ready: %s",
restoring_file, msg)
return sess, False
logging.info("Restored model from %s", restoring_file)
return sess, is_loaded_from_checkpoint
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
NB: The amount of time this method waits for the session is bounded
by max_wait_secs. By default, this function will wait indefinitely.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
A `Session`. May be None if the operation exceeds the timeout
specified by config.operation_timeout_in_ms.
Raises:
tf.DeadlineExceededError: if the session is not available after
max_wait_secs.
"""
self._target = master
if max_wait_secs is None:
max_wait_secs = float("Inf")
timer = _CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
not_ready_msg = None
not_ready_local_msg = None
local_init_success, not_ready_local_msg = self._try_run_local_init_op(
sess)
if local_init_success:
# Successful if local_init_op is None, or ready_for_local_init_op passes
is_ready, not_ready_msg = self._model_ready(sess)
if is_ready:
return sess
self._safe_close(sess)
# Do we have enough time left to try again?
remaining_ms_after_wait = (
timer.secs_remaining() - self._recovery_wait_secs)
if remaining_ms_after_wait < 0:
raise errors.DeadlineExceededError(
None, None,
"Session was not ready after waiting %d secs." % (max_wait_secs,))
logging.info("Waiting for model to be ready. "
"Ready_for_local_init_op: %s, ready: %s",
not_ready_local_msg, not_ready_msg)
time.sleep(self._recovery_wait_secs)
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
def _model_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
return _ready(self._ready_op, sess, "Model not ready")
def _model_ready_for_local_init(self, sess):
"""Checks if the model is ready to run local_init_op.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready to run
local_init_op and False otherwise, and msg is `None` if the model is
ready to run local_init_op, a `String` with the reason why it is not ready
otherwise.
"""
return _ready(self._ready_for_local_init_op, sess,
"Model not ready for local init")
def _try_run_local_init_op(self, sess):
"""Tries to run _local_init_op, if not None, and is ready for local init.
Args:
sess: A `Session`.
Returns:
A tuple (is_successful, msg), where is_successful is True if
_local_init_op is None, or we ran _local_init_op, and False otherwise;
and msg is a `String` with the reason why the model was not ready to run
local init.
"""
if self._local_init_op is not None:
is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
logging.info("Running local_init_op.")
sess.run(self._local_init_op)
logging.info("Done running local_init_op.")
return True, None
else:
return False, msg
return True, None
def _ready(op, sess, msg):
"""Checks if the model is ready or not, as determined by op.
Args:
op: An op, either _ready_op or _ready_for_local_init_op, which defines the
readiness of the model.
sess: A `Session`.
msg: A message to log to warning if not ready
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
if op is None:
return True, None
else:
try:
ready_value = sess.run(op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if (ready_value is None or ready_value.dtype == np.int32 or
ready_value.size == 0):
return True, None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join(
[i.decode("utf-8") for i in ready_value])
return False, "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("%s : error [%s]", msg, str(e))
raise e
return False, str(e)
class _CountDownTimer(object):
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
| [
"[email protected]"
] | |
92c3a0d5822904b02ee02cf30204b593268f8d36 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200413235341.py | 1cded98ea6504881b7ef71c0979704ed33286f9f | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,000 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
def rho_free(x,xp,beta):
"""
Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial harmónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anharmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al osciladoe armónico
(presente en un baño térmico) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición dada para temperatura T dada.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo el potencial "potential".
Recibe:
xmax: float -> los valores de x estarán en el intervalo (-xmax,xmax).
nx: int -> número de valores de x considerados.
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción, debe ser una función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
dx = 2. * x_max / (nx - 1)
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
El sistema asociado a la matriz densidad obtenida (al final de aplicar el algoritmo)
está a temperatura inversa beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada "rho".
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad "rho".
print_steps: bool -> muestra valores de beta en cada iteración
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado "rho" a temperatura
inversa igual a "beta_fin".
trace_rho: int -> traza de la matriz densidad a temperatura inversa
igual a "beta_fin". Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a "rho".
"""
dx = grid_x[1] - grid_x[0]
beta_fin = beta_ini * 2 ** N_iter
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
if print_steps==True:
print(u'Iteration %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=True):
"""
Uso: guarda datos de la distribución pi(x;beta)
Recibe:
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada pi(x;beta).
x_weights: numpy array, shape=(nx,) ->
"""
pi_x_data = {'Position x': grid_x,
'Prob. density': x_weights}
pi_x_data = pd.DataFrame(data=pi_x_data)
with open(file_name,mode='w') as rho_csv:
rho_csv.write(relevant_info+'\n')
rho_csv.close()
with open(file_name,mode='a') as rho_csv:
pi_x_data.to_csv(rho_csv)
rho_csv.close()
if print_data==True:
print(pi_x_data)
return pi_x_data
def run_pi_x_squaring(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, plot=True, save_plot=True, show_plot=True):
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('----------------------------------------------------------------\n',
u'beta_fin = %.3f Z(beta_fin) = Tr(rho(beta_fin)) ≈ %.3E \n'%(beta_fin_2,trace_rho))
# Normalización de rho y cálculo de densidades de probabilidad para valores en grid_x
rho_normalized = rho/trace_rho
x_weights = np.diag(rho_normalized)
if save_data==True:
# Nombre del archivo csv en el que guardamos valores de pi(x;beta_fin)
file_name = u'pi_x-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.csv'\
%(potential_string,x_max,nx,N_iter,beta_fin)
# Información relevante para agregar como comentario al archivo csv
relevant_info = u'# %s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin
# Guardamos valores de pi(x;beta_fin) en archivo csv
save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
plot_name = u'pi_x-plot-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.eps'\
%(potential_string,x_max,nx,N_iter,beta_fin)
plt.savefig(plot_name)
if show_plot==True:
plt.show()
plt.close()
return 0
plt.rcParams.update({'font.size':15})
run_pi_x_squaring(potential = harmonic_potential, potential_string = 'harmonic_potential',
save_data=True, save_plot=False, show_plot=True)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.