hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e359e595d02499d12ce9088ccf34ac138ffada36 | 384 | py | Python | 2018/2/hash.py | octonion/adventofcode | 132e8bf0c9bc0ad64a0e12e22170177df4947e37 | [
"MIT"
]
| 1 | 2019-01-10T09:43:34.000Z | 2019-01-10T09:43:34.000Z | 2018/2/hash.py | octonion/adventofcode | 132e8bf0c9bc0ad64a0e12e22170177df4947e37 | [
"MIT"
]
| null | null | null | 2018/2/hash.py | octonion/adventofcode | 132e8bf0c9bc0ad64a0e12e22170177df4947e37 | [
"MIT"
]
| null | null | null | data = [i.strip() for i in open("input.txt").readlines()]
two = 0
three = 0
for code in data:
counts = {}
for i in range(0,len(code)):
if code[i] in counts.keys():
counts[code[i]] += 1
else:
counts[code[i]] = 1
if (2 in counts.values()):
two += 1
if (3 in counts.values()):
three += 1
print(two*three)
| 21.333333 | 57 | 0.492188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.028646 |
e359f824dde6ff522819969499136201763f90fa | 322 | py | Python | conf_site/speakers/tests/factories.py | jasongrout/conf_site | 6b3beb21de8d847cba65dcb6da84464b40739d48 | [
"MIT"
]
| 13 | 2015-05-22T17:10:22.000Z | 2021-07-15T16:45:19.000Z | conf_site/speakers/tests/factories.py | jasongrout/conf_site | 6b3beb21de8d847cba65dcb6da84464b40739d48 | [
"MIT"
]
| 758 | 2015-03-18T13:39:25.000Z | 2022-03-31T13:14:09.000Z | conf_site/speakers/tests/factories.py | jasongrout/conf_site | 6b3beb21de8d847cba65dcb6da84464b40739d48 | [
"MIT"
]
| 16 | 2015-03-24T18:53:17.000Z | 2020-10-22T21:30:02.000Z | # -*- coding: utf-8 -*-
import factory
from symposion.speakers.models import Speaker
from conf_site.accounts.tests.factories import UserFactory
class SpeakerFactory(factory.django.DjangoModelFactory):
user = factory.SubFactory(UserFactory)
name = factory.Faker("name")
class Meta:
model = Speaker
| 21.466667 | 58 | 0.73913 | 173 | 0.537267 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.090062 |
e35a2abebd28f1f938e6001756592d76df4ec548 | 179 | py | Python | pokediadb/dbuilder/__init__.py | Kynarth/pokediadb | 97d981909803335f878b9e07ed31d86fc93e7941 | [
"MIT"
]
| null | null | null | pokediadb/dbuilder/__init__.py | Kynarth/pokediadb | 97d981909803335f878b9e07ed31d86fc93e7941 | [
"MIT"
]
| null | null | null | pokediadb/dbuilder/__init__.py | Kynarth/pokediadb | 97d981909803335f878b9e07ed31d86fc93e7941 | [
"MIT"
]
| null | null | null | # flake8: noqa
import pokediadb.dbuilder.version
import pokediadb.dbuilder.type
import pokediadb.dbuilder.ability
import pokediadb.dbuilder.move
import pokediadb.dbuilder.pokemon
| 25.571429 | 33 | 0.860335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.078212 |
e35b469d7625c1fa8f422ae121b4eaab1ed606da | 10,171 | py | Python | origin/app_bb_modifier.py | nukeguys/myutil | 65d0aff36ec45bffbd2e52fea0fabfbabd5609b1 | [
"Apache-2.0"
]
| null | null | null | origin/app_bb_modifier.py | nukeguys/myutil | 65d0aff36ec45bffbd2e52fea0fabfbabd5609b1 | [
"Apache-2.0"
]
| null | null | null | origin/app_bb_modifier.py | nukeguys/myutil | 65d0aff36ec45bffbd2e52fea0fabfbabd5609b1 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python3
import sys
import os
import io
from orderedset import OrderedSet
from shell import Shell
import logpath as LogPath
VERSION = '1.1'
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
current_path = os.getcwd()
current_meta = ''
if current_path.endswith('meta-signage') == True:
current_meta = 'signage'
elif current_path.endswith('meta-commercial') == True:
current_meta = 'commercial'
elif current_path.endswith('meta-id') == True:
current_meta = 'id'
else:
print('You should execute this file in [%smeta-id, meta-commercial, meta-signage%s] path' % (WARNING, ENDC))
exit()
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
class AbstractVerifyCommit:
def __init__(self, _appName, _tagName):
self.appName = _appName
self.tagName = _tagName
if(len(self.appName) == 0):
self.appName = self.inputString('app name')
if(len(self.tagName) == 0):
self.tagName = self.inputString('tag name')
self.tagName = self.tagName.strip()
print('AppName : %s, TagName : %s' % (self.appName, self.tagName))
def inputString(self, title):
sys.stdout.write(title + ' : ')
ret = input()
if len(ret) == 0:
sys.exit()
return ret
def process(self):
if not self.getBBFileName() or not self.getBBFileDir():
return
self.changeDir()
self.writeWebOsVersion()
self.fileReadAndWrite()
self.fileRemoveAndRename()
self.changeDirBack()
def changeDir(self):
os.chdir(self.dirPath)
def getShowRefTag(self):
bashCommands = [
"git ls-remote --tags ssh://we.lge.com:29425/id/app/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://wall.lge.com:29448/app/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/webos-pro/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/gpro/starfish/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/module/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/gpro/webos-pro/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://wall.lge.com:29448/webos-pro/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/service/%s.git | grep '%s$' | awk {'print $1'}"
]
err_msg=""
changeAppName=self.appName
if self.appName == 'configd-data':
changeAppName = 'configd-data-starfish'
for bashCommand in bashCommands:
bashCommand = bashCommand % (changeAppName, self.tagName)
str_output, str_err = Shell.execute(bashCommand)
if str_err == "":
return str_output.replace('\n', '')
else:
err_msg = str_err
print('%s%s \n%s!!!!!!!%s' % (WARNING, bashCommand, str_err, ENDC))
sys.exit()
def getBBFileName(self):
bashCommand = "find . -type f -name '%s.b*' -exec basename {} \; " % (self.appName)
str_output, str_err = Shell.execute(bashCommand)
if str_err != "":
print('%s%s Error : %s!!!!!!!%s' % (WARNING, bashCommand, str_err, ENDC))
return False
else:
self.bbfilename=str_output.replace('\n', '')
return True
def getBBFileDir(self):
bashCommand = "find . -type f -name '%s.b*' -exec dirname {} \; " % (self.appName)
str_output, str_err = Shell.execute(bashCommand)
if str_err != "":
print('%s%s Error : %s!!!!!!!%s' % (WARNING, bashCommand, str_err, ENDC))
return False
else:
self.dirPath=str_output.replace('\n','')
return True
def writeWebOsVersion(self):
self.fileContents['WEBOS_VERSION'] = self.tagName.split('/')[::-1][0] + '_' + self.getShowRefTag()
def fileReadAndWrite(self):
with open(self.bbfilename, 'r') as readF, open('my_' + self.bbfilename, 'w') as writeF:
isWritten = False
for x in readF.readlines():
for key in self.fileContents:
if x.startswith(key):
key_content=x.replace(key, '').replace('=', '').replace('"','').split()[0]
version=key_content.split('-')[0]
writeF.write(key + ' = "' + version + '-' + self.fileContents[key] + '"\n')
isWritten = True
break;
if isWritten == False:
writeF.write(x)
isWritten = False
def fileRemoveAndRename(self):
Shell.execute('rm %s' % self.bbfilename)
Shell.execute('mv my_%s %s' % (self.bbfilename, self.bbfilename))
def changeDirBack(self):
os.chdir(current_path)
class App(AbstractVerifyCommit):
fileContents = { 'WEBOS_VERSION':'' }
def __init__(self, appName, tagName):
AbstractVerifyCommit.__init__(self, appName, tagName)
class InputHelper:
def process(self, listLogs):
print('%sVersion%s : %s' % (OKBLUE, ENDC, VERSION))
print('If you have any questions, please inquire at %s%[email protected]%s' % (UNDERLINE, BOLD, ENDC))
for log in listLogs:
App(log.name, 'submissions/' + log.tag).process()
#sys.stdout.write('Multiple Apps?[Y/N] : ')
#multipleApps=input()
#if(multipleApps == 'Y' or multipleApps == 'y'):
# print('')
#else:
# App().process()
class Log:
list_name = []
list_tag = []
IDX=enum('RELEASE_NOTES', 'DETAILED_NOTES', 'TESTING_PERFORMED', 'QA_NOTES', 'ISSUES_ADDRESSED')
list_title = [
':Release Notes:',
':Detailed Notes:',
':Testing Performed:',
':QA Notes:',
':Issues Addressed:'
]
def __init__(self):
self.name = ''
self.tag = ''
self.items = [ [], [], [], [], [] ]
def process(self, filename):
with open(filename, 'r') as readF:
idx=-1
for strX in readF.readlines():
if len(self.name) == 0:
self.name = strX.split('=')[0]
self.tag = strX.split('=')[1]
self.list_name.append(self.name)
self.list_tag.append(self.tag)
elif len(self.list_title) > (idx+1) and strX.startswith(self.list_title[idx+1]):
idx=idx+1
else:
if idx == Log.IDX.QA_NOTES or idx == Log.IDX.ISSUES_ADDRESSED:
if strX.lower() == 'na' or strX.lower() == 'none':
continue
if idx != -1:
self.items[idx].append(strX)
def printAll(self):
print ('name : %s, tag : %s' % (self.name, self.tag))
for item in self.items:
for content in item:
print (content)
class GeneratorLog():
def __init__(self):
GeneratorLog.topdir = LogPath.getAppLogPath()
if GeneratorLog.topdir == '':
print('You must make log first!')
sys.exit()
self.listLogs = []
def printLog(self, logfile):
with open(logfile, 'r') as readF:
for i in readF:
sys.stdout.write(i)
def makeLog(self):
logfile = self.topdir + 'commit_log'
with open(logfile, 'w') as writeF:
writeF.write(self.makeTitle() + '\n')
for i in range(0, len(Log.list_title)):
self.makeNotes(Log.list_title[i], i, writeF)
#self.printLog(logfile)
def makeNotes(self, title, idx, writeF):
writeF.write(title + '\n')
items = []
for log in self.listLogs:
if idx == Log.IDX.DETAILED_NOTES:
items.append('[' + log.name[log.name.rfind('.')+1:] + ']\n')
for item in log.items[idx]:
items.append(item)
#if idx > Log.IDX.DETAILED_NOTES:
# items = list(OrderedSet(items))
for item in items:
writeF.write(item)
def getDuplicateTitle(self):
l = list(self.listLogs[0].list_name[0])
for name in self.listLogs[0].list_name[1:]:
i = 0
for char in list(name):
if len(l) <= i or l[i] != char:
return ''.join(l[:i])
i += 1
return ''
def makeTitle(self):
duplicateTitles = self.getDuplicateTitle()
containsDuplicateTitle = len(duplicateTitles) > 0
title_str = io.StringIO()
if containsDuplicateTitle == True:
title_str.write(duplicateTitles + '{')
for name, tag in zip(self.listLogs[0].list_name, self.listLogs[0].list_tag):
title_str.write(name[len(duplicateTitles):] + '=' + tag + ',')
title = title_str.getvalue()
title = title[:len(title)-1]
return title + '}' if containsDuplicateTitle == True else title
def parseLog(self):
for root, dirs, files in os.walk(self.topdir, topdown=False):
for name in files:
if name.lower().endswith('.log'):
filename = os.path.join(root, name)
log = Log()
log.process(filename)
#log.printAll()
self.listLogs.append(log)
if __name__ == '__main__':
generater = GeneratorLog()
generater.parseLog()
generater.makeLog()
InputHelper().process(generater.listLogs)
| 39.730469 | 125 | 0.534756 | 9,046 | 0.889391 | 0 | 0 | 0 | 0 | 0 | 0 | 2,166 | 0.212958 |
e35b94ca7170c796ccad0fbd61ea4ee542cd52e0 | 3,514 | py | Python | gamebike/controlmapbits.py | johnlpage/gamebike | 429736d0238dca2961763f2a33d8e4e72ed97364 | [
"Apache-2.0"
]
| null | null | null | gamebike/controlmapbits.py | johnlpage/gamebike | 429736d0238dca2961763f2a33d8e4e72ed97364 | [
"Apache-2.0"
]
| null | null | null | gamebike/controlmapbits.py | johnlpage/gamebike | 429736d0238dca2961763f2a33d8e4e72ed97364 | [
"Apache-2.0"
]
| null | null | null | # These were used when I was trying to map between controllers
# To map to a wheel - but was defeated in that by using a driver
# 2021 comment (What did I mean there?)
GAMEPAD_TRIANGLE = (0, 0x08)
GAMEPAD_CIRCLE = (0, 0x04)
GAMEPAD_CROSS = (0, 0x02)
GAMEPAD_SQUARE = (0, 0x01)
GAMEPAD_DPAD_MASK = 0x0F
GAMEPAD_DPAD_NONE = (2, 0x0F)
GAMEPAD_DPAD_U = (2, 0x00)
GAMEPAD_DPAD_R = (2, 0x02)
GAMEPAD_DPAD_D = (2, 0x04)
GAMEPAD_DPAD_L = (2, 0x06)
GAMEPAD_PSMENU = (1, 0x10)
GAMEPAD_SELECT = (1, 0x01)
GAMEPAD_START = (1, 0x02)
GAMEPAD_LJOY_BUTTON = (1, 0x04)
GAMEPAD_RJOY_BUTTON = (1, 0x08)
GAMEPAD_L1 = (0, 0x10)
GAMEPAD_R1 = (0, 0x20)
GAMEPAD_L2 = (0, 0x40)
GAMEPAD_R2 = (0, 0x80)
GAMEPAD_RTRIGGER = 18
GAMEPAD_LTRIGGER = 17
# These are Bytes not Bits
GAMEPAD_LJOY_X = 3
GAMEPAD_LJOY_Y = 4
GAMEPAD_RJOY_X = 5
GAMEPAD_RJOY_Y = 6
CLICKER_BUTTONS = 2
CLICKER_LEFT = [0x4B]
CLICKER_RIGHT = [0x4E]
CLICKER_UP = [0x05]
CLICKER_DOWN = [0x3E, 0x29] # Toggles
STEER_MIN = 0x0000
STEER_MAX = 0x3FFF
STEER_MID = 0x1FFF
WHEEL_NEUTRAL = [0x08, 0x00, 0x00, 0x5E, 0x00, 0x20, 0x7F, 0xFF]
WHEEL_TRIANGLE = (0, 0x80)
WHEEL_CIRCLE = (0, 0x40)
WHEEL_CROSS = (0, 0x10)
WHEEL_SQUARE = (0, 0x20)
WHEEL_DPAD_MASK = 0x0F
WHEEL_DPAD_NONE = (0, 0x08)
WHEEL_DPAD_U = (0, 0x00)
WHEEL_DPAD_R = (0, 0x02)
WHEEL_DPAD_D = (0, 0x04)
WHEEL_DPAD_L = (0, 0x06)
WHEEL_RPADDLE = (1, 0x01)
WHEEL_LPADDLE = (1, 0x02)
WHEEL_L1 = (1, 0x80)
WHEEL_L2 = (1, 0x08)
WHEEL_R1 = (1, 0x40)
WHEEL_R2 = (1, 0x04)
WHEEL_SELECT = (1, 0x10)
WHEEL_START = (1, 0x20)
WHEEL_PSMENU = (2, 0x08)
WHEEL_GEARUP = (2, 0x01)
WHEEL_GEARDOWN = (2, 0x02)
WHEEL_BACK = (2, 0x04)
WHEEL_ADJUST_CLOCKWISE = (2, 0x10)
WHEEL_ADJUST_ANTICLOCKWISE = (2, 0x20)
WHEEL_PLUS = (2, 0x80)
WHEEL_MINUS = (2, 0x40)
# Bytes
WHEEL_WHEEL_HIGHBYTE = 5
WHEEL_WHEEL_LOWBYTE = 4 # 0000-EFF3 But 0000 is extreme
WHEEL_ACCELERATEBYTE = 6 # 0-FF 0 IS DOWN
WHEEL_BRAKEBYTE = 7 # 0-FF 0 IS DOWN
# (FromByte,From Bit) -> (ToByte,ToBit)
# Wheel Has dedicated Gear buttons and Shifter that arent on the controller
# Stick Click is not used in TDU2 at all so will use that
BUTTON_MAPPINGS = [
(GAMEPAD_TRIANGLE, WHEEL_TRIANGLE),
(GAMEPAD_CIRCLE, WHEEL_CIRCLE),
(GAMEPAD_SQUARE, WHEEL_SQUARE),
(GAMEPAD_CROSS, WHEEL_CROSS),
(GAMEPAD_R1, WHEEL_R2),
(GAMEPAD_L1, WHEEL_L2),
(GAMEPAD_PSMENU, WHEEL_PSMENU),
(GAMEPAD_START, WHEEL_START),
(GAMEPAD_SELECT, WHEEL_SELECT),
(GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN),
(GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP),
]
#These made it work in PS3 menu screen
XMB_BUTTON_MAPPINGS = [
(GAMEPAD_TRIANGLE, WHEEL_TRIANGLE),
(GAMEPAD_CIRCLE, WHEEL_CIRCLE),
(GAMEPAD_CROSS, WHEEL_SQUARE),
(GAMEPAD_SQUARE, WHEEL_CROSS),
(GAMEPAD_R1, WHEEL_R2),
(GAMEPAD_L1, WHEEL_L2),
(GAMEPAD_PSMENU, WHEEL_PSMENU),
(GAMEPAD_START, WHEEL_START),
(GAMEPAD_SELECT, WHEEL_SELECT),
(GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN),
(GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP),
]
DPAD_MAPPINGS = [
(GAMEPAD_DPAD_NONE, WHEEL_DPAD_NONE),
(GAMEPAD_DPAD_U, WHEEL_DPAD_U),
(GAMEPAD_DPAD_D, WHEEL_DPAD_D),
(GAMEPAD_DPAD_L, WHEEL_DPAD_L),
(GAMEPAD_DPAD_R, WHEEL_DPAD_R),
]
STEAM_BUTTON_MAPPINGS = [
WHEEL_CROSS,WHEEL_CIRCLE,WHEEL_TRIANGLE,WHEEL_SQUARE,
WHEEL_START,WHEEL_PSMENU,WHEEL_SELECT,
WHEEL_GEARUP,WHEEL_GEARDOWN,WHEEL_L1,WHEEL_R1
]
STEAM_BUTTONS2_MAPPINGS = [WHEEL_LPADDLE,WHEEL_RPADDLE,WHEEL_PLUS,WHEEL_MINUS]
STEAM_DPAD_MAPPINGS = [ WHEEL_DPAD_U,WHEEL_DPAD_L,WHEEL_DPAD_D,WHEEL_DPAD_R] | 24.746479 | 79 | 0.726807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.136312 |
e35c3059ee54c88a4dcfc9000cdd58922b31c667 | 4,185 | py | Python | conveyordashboard/volumes/tables.py | Hybrid-Cloud/birdie-dashboard | a3dc370c50ef9f33498a8ed4180ff5009532a79f | [
"Apache-2.0"
]
| null | null | null | conveyordashboard/volumes/tables.py | Hybrid-Cloud/birdie-dashboard | a3dc370c50ef9f33498a8ed4180ff5009532a79f | [
"Apache-2.0"
]
| null | null | null | conveyordashboard/volumes/tables.py | Hybrid-Cloud/birdie-dashboard | a3dc370c50ef9f33498a8ed4180ff5009532a79f | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from conveyordashboard.common import actions as common_actions
from conveyordashboard.common import constants as consts
from conveyordashboard.common import resource_state
class CreatePlan(common_actions.CreatePlan):
def allowed(self, request, volume=None):
return volume.status in resource_state.VOLUME_CLONE_STATE
class VolumeFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
q = filter_string.lower()
def comp(volume):
return q in volume.name.lower()
return filter(comp, volumes)
def get_size(volume):
return _("%sGiB") % volume.size
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("available", True),
)
STATUS_DISPLAY_CHOICES = (
("available", pgettext_lazy("Current status of a Volume",
u"Available")),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"),
link="horizon:project:volumes:"
"volumes:encryption_detail")
class Meta(object):
name = 'volumes'
verbose_name = _("Volumes")
css_classes = "table-res %s" % consts.CINDER_VOLUME
status_columns = ["status"]
table_actions = (common_actions.CreatePlanWithMultiRes,
VolumeFilterAction)
row_actions = (CreatePlan,)
| 35.769231 | 78 | 0.61601 | 2,817 | 0.673118 | 0 | 0 | 0 | 0 | 0 | 0 | 1,069 | 0.255436 |
e35f9878c2b4e671e6f25e427a7cdba3e0466f0b | 9,046 | py | Python | tools/tver/tver.py | jackyzy823/restrictionbreaker | 1aabce0c98c50782a592fa6c91abd72e82e59a6a | [
"Unlicense"
]
| 5 | 2019-05-29T21:34:34.000Z | 2021-07-25T10:58:57.000Z | tools/tver/tver.py | jackyzy823/restrictionbreaker | 1aabce0c98c50782a592fa6c91abd72e82e59a6a | [
"Unlicense"
]
| null | null | null | tools/tver/tver.py | jackyzy823/restrictionbreaker | 1aabce0c98c50782a592fa6c91abd72e82e59a6a | [
"Unlicense"
]
| 1 | 2020-06-25T14:14:41.000Z | 2020-06-25T14:14:41.000Z | import requests
import re
import sqlite3
db =sqlite3.connect("db_tver.db",check_same_thread =False)
cur = db.cursor()
cur.execute(
'''CREATE TABLE if not exists `tver` (
`reference_id` TEXT NOT NULL,
`service` TEXT NOT NULL,
`player_id` TEXT NOT NULL,
`name` TEXT NOT NULL,
`title` TEXT,
`subtitle` TEXT,
`catchup_id` TEXT,
`url` TEXT,
`service_name` TEXT,
`id` TEXT NOT NULL,
`json` TEXT,
`updated_at` TIMESTAMP,
`done` BOOL,
UNIQUE (reference_id,player_id,id)
);''')
'''
/corner/
/episode/
/feature/
'''
pagepattern = re.compile(r'''addPlayer\(\s*?'(?P<player_id>.*?)',\s*?'(?P<player_key>.*?)',\s*?'(?P<catchup_id>.*?)',\s*?'(?P<publisher_id>.*?)',\s*?'(?P<reference_id>.*?)',\s*?'(?P<title>.*?)',\s*?'(?P<subtitle>.*?)',\s*?'(?P<service>.*?)',\s*?'(?P<servicename>.*?)',''')
policykeypattern = re.compile(r'''catalog\(\{accountId:\"?(?P<accountId>.*?)\"?,policyKey:\"(?P<policyKey>.*?)\"''')
BCOV_POLICY = {
#YTV
"5330942432001":"BCpkADawqM0kGrWxZoXJvJj5Uv6Lypjp4Nrwzz1ktDAuEbD1r_pj0oR1900CRG04FFkxo0ikc1_KmAlB4uvq_GnFwF4IsG_v9jhYOMajC9MkdVQ-QrpboS7vFV8RvK20V5v-St5WGPfXotPx",
#TX
"3971130137001":"BCpkADawqM1F2YPxbuFJzWtohXjxdgDgIJcsnWacQKaAuaf0gyu8yxCQUlca9Dh7V0Uu_8Rt5JUWZTpgcqzD_IT5hRVde8JIR7r1UYR73ne8S9iLSroqTOA2P-jtl2EUw_OrSMAtenvuaXRF",
#TBS
"4031511847001":"BCpkADawqM1n_azNkrwm-kl2UhijTLt4W7KZ6KS9HluAoLPvyRFu2X4Xu2dUuW-lLOmc6X7WjsiBwh83m8ecNmxl-pVy9w3M9iI6-en-_wIDvNJixpoMf4BhdOPtwO_7XIol9P3wVrq2BIzw",
"4394098881001":"BCpkADawqM3m-3484dphPL5raj3jQJVlFecOYAvpxhtJaK99BVRKtxd9SC6q0kOsknI1FD3kplVUaJzneAQb55EkCcDHrD9m_yoesmjsIfJpKQXJKfmQ5LfAFJnmf2Sv48heP_R1PGznwbAn",
#NTV
"4394098882001":"BCpkADawqM1s6XkqRoC2a0eEORY7FFF780eHkHQZ93Fw752A9swymrSMZEVF1d7G3mSby3Etzj8MGJp_ZwXpbSTH1ApfZxZ1FSPQ4LXDQhpaMRADtCbxKFTpAxGYwN61DYKKksmg4uwcdhLD",
#MBS
"5102072605001":"BCpkADawqM1VhDl0FtgrrM8jB-hVNkcrdrx4x9C_60OSeN4jIHynGkIKw0PY1cOsRqQYJOnJRscPAbdPTcpzZ_4g89Gcte_yQFW-yeWxzrPECulIh9ZlaZsJ_3rH7Gjs_RnuWHx_lTzilaxh",
#KTV
"5718741494001":"BCpkADawqM1llDtMelQ9nQyE91bAc-E5T1B0135MCCRZ_o4FlDkGWQY8t8Nrt1fJKAgui-kLefX-JGaRItrDXh_C1GlIgCSv-rhNPQYKJsY8nZp_IoJ38Mf3B5BSJLFulW0QhgQduipc9j4D",
#EX no publisherid
"4031511847001":"BCpkADawqM2N0e6IdrmQn-kEZJ0jRi-Dlm0aUZ9mVF2lcadunJzMVYD6j_51UZzQ3mXuIeV8Zx_UUvbGeeJn73SSrpm0xD7qtiKULPP2NEsp_rgKoVxVWTNZAHN-JAHcuIpFJT7PvUj6gpZv",
#ABC
"5102072603001":"BCpkADawqM2NfzEA47jZiJNK0SYahFenNwAtoehfrIAaCqxmHjBidnt_YfvFnp5j-Zi58FPj-zXAHATYU1nnOOuEf9XXV8JRGYSuZ5dgyNc2RjGv2Ej5zGfhxWs3_p4F7huxtbAD9fzQlg7b",
#World cup
"5764318572001":"BCpkADawqM3KJLCLszoqY9KsoXN2Mz52LwKx4UXYRuEaUGr-o3JBSHmz_0WRicxowBj8vmbGRK_R7Us96DdBYuYEoVX9nHJ3DjkVW5-8L6bRmm6gck8IaeLLw21sM6mOHtNs9pIJPF6a4qSZlO6t_RlkpMY6sasaIaSYlarJ_8PFMPdxxfY6cGtJDnc"
}
def dbCommit(datatuple):
try:
cur.execute("insert into tver (`reference_id` , `service` , `player_id` , `name` , `title` , `subtitle` , `catchup_id` , `url` , `service_name` , `id` , `json` , `updated_at` , `done`) values (?,?,?,?,?,?,?,?,?,?,?,?,0);",
datatuple);
except sqlite3.IntegrityError:
pass
# print "duplicate: {0}: {1}".format(datatuple[1],datatuple[7])
db.commit()
def parsePage(url):
res = pagepattern.search(requests.get(url).content)
if not res:
raise ValueError(url)
resdict = res.groupdict()
service = resdict["service"]
servicename = resdict["servicename"]
publisher_id = resdict['publisher_id']
catchup_id = resdict['catchup_id']
title = resdict["title"]
title = title.decode("utf-8").strip(u'\u3000').strip().encode("utf-8")
subtitle = resdict["subtitle"]
subtitle = subtitle.decode("utf-8").strip(u'\u3000').strip().encode("utf-8")
#.strip(u'\u3000').strip()
if service == 'cx':
# id = publisher_id
player_id = ''
player_key = ''
reference_id = ''
#PUBLISHERID (like 4f50810003) FOR CX
if len(publisher_id) == 4:
infoapi = "https://i.fod.fujitv.co.jp/plus7/web/{0}.html".format(publisher_id)
resp = requests.get(infoapi)
else:
infoapi = "https://i.fod.fujitv.co.jp/plus7/web/{0}/{1}.html".format(publisher_id[0:4],publisher_id)
resp = requests.get(infoapi)
# print "url for cx :{0}".format(url)
#https://i.fod.fujitv.co.jp/plus7/web/ + publisher_id[0:4]+"/"+publisher_id+".html"
try:
name = re.findall(r'_wa\.parameters\[ \'title\' \] = \'(.*)\';',resp.content)[0].strip().decode('utf-8')
except:
print "url for cx :{0},{1}".format(url,infoapi)
name = ""
meta = re.findall(r'else\s*?{\s*?meta = \'(.*?)\';',resp.content,re.S)[0]
if len(meta) == 0:
meta = 'me113'
m3u8 = re.findall(r'url: "(.*?)",',resp.content)[0].replace('" + meta + "',meta)
if len(publisher_id)==4:
publisher_id = re.findall("([^/]*?)"+meta,m3u8)[0]
if len(subtitle) ==0:
resp = requests.get("http://fod-sp.fujitv.co.jp/s/tver/redir.aspx?ser={0}".format(publisher_id))
if resp.url.find("error")!=-1:
#for those pasts ->
subtitle = publisherid[-4:]
else:
subtitle = re.findall(r'''episode-title\">\s*?<h3>(.*?)</h3>''',resp.content)[0].replace('\xe3\x80\x90\xe7\x84\xa1\xe6\x96\x99\xe3\x81\xe3\x80\x91','').strip()
return (reference_id,
service,
player_id,
name.replace("/","_"),
title.replace("/","_").decode("utf-8").strip(u'\u3000').strip(),
subtitle.replace("/","_").decode("utf-8").strip(u'\u3000').strip(),
catchup_id,
url,
servicename.decode("utf-8"),
publisher_id,
m3u8,
None)
# raise ValueError("url for tx :{0}".format(url))
if service != 'tx':
reference_id = "ref:"+resdict['reference_id']
else:
reference_id = resdict['reference_id']
player_id = resdict['player_id'] #is also accountid
player_key = resdict['player_key']
infoapi = "https://players.brightcove.net/{0}/{1}_default/index.min.js".format(player_id,player_key)
res = policykeypattern.search(requests.get(infoapi).content)
if not res:
print infoapi
policyKey = BCOV_POLICY[player_id]
#use default?
else:
resdict = res.groupdict()
policyKey = resdict['policyKey']
playinfoapi = "https://edge.api.brightcove.com/playback/v1/accounts/{0}/videos/{1}".format(player_id,reference_id)
res = requests.get(playinfoapi,headers = {"X-Forwarded-For":"1.0.16.0","Accept":"application/json;pk={0}".format(policyKey)})
resj = res.json()
# import pdb;pdb.set_trace()
return (reference_id,
service,
player_id,
resj['name'].replace("/","_"),
title.replace("/","_").decode("utf-8").strip(u'\u3000').strip(),
subtitle.replace("/","_").decode("utf-8").strip(u'\u3000').strip(),
catchup_id,
url,
servicename.decode("utf-8"),
resj["id"],
res.content.decode("utf-8") ,
resj["updated_at"])
#source
#res["name"]
# "published_at"
# "duration published_at updated_at created_at
# id is the realid
linkPattern = re.compile(r'''(\/episode\/.*?)\/?\"|(\/corner\/.*?)\/?\"|(\/feature\/.*?)\/?\"''')
def filter_finish(urls): #set
return urls
# cur.execute("select url from tver where url in ("+",".join("?"*len(urls))+");",tuple(urls))
# m_url = map(lambda x : x[0] , cur.fetchall())
# return urls - set(m_url)
def findAll():
for url in ("/","/ranking", "/soon", "/drama", "/variety", "/documentary", "/anime", "/sport", "/other"):
page = requests.get("https://tver.jp{0}".format(url)).content
urls = linkPattern.findall(page)
links = filter_finish(set(map(lambda x : "https://tver.jp{0}".format(filter (lambda y:y ,x)[0]) ,urls))) #without right /
for i in links:
try:
dbCommit(parsePage(i))
except Exception as e:
print str(e)
print i
def updateJson():
cur.execute("select rowid,url from tver where done = -2 ; ")
res = cur.fetchall()
for i in res:
r = parsePage(i[1])
cur.execute("update tver set json=? ,done=0 where rowid= ?",(r[10],i[0]))
db.commit()
def findAllByBrand():
for svc in ("tbs","tx", "ex", "ntv", "cx", "ktv", "mbs", "abc", "ytv"):
page = requests.get("https://tver.jp/{0}".format(svc)).content
urls = linkPattern.findall(page)
links = filter_finish(set(map(lambda x : "https://tver.jp{0}".format(filter (lambda y:y ,x)[0]) ,urls))) #without right /
for i in links:
try:
dbCommit(parsePage(i))
except Exception as e:
print str(e)
print i
findAll()
findAllByBrand()
# updateJson() | 41.118182 | 272 | 0.624254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,722 | 0.521999 |
e360941b07ce2d49e4d682a79c218a27dc642b96 | 1,696 | py | Python | tests/test_init.py | nuvolos-cloud/resolos | 0918066cab7b11ef04ae005f3e052b14a65ded68 | [
"MIT"
]
| 1 | 2021-11-30T06:47:24.000Z | 2021-11-30T06:47:24.000Z | tests/test_init.py | nuvolos-cloud/resolos | 0918066cab7b11ef04ae005f3e052b14a65ded68 | [
"MIT"
]
| 1 | 2021-04-08T12:56:39.000Z | 2021-04-08T12:56:39.000Z | tests/test_init.py | nuvolos-cloud/resolos | 0918066cab7b11ef04ae005f3e052b14a65ded68 | [
"MIT"
]
| null | null | null | from pathlib import Path
from click.testing import CliRunner
from resolos.interface import res, res_run
from resolos.shell import run_shell_cmd
from tests.common import verify_result
import logging
logger = logging.getLogger(__name__)
def test_init_empty():
runner = CliRunner()
with runner.isolated_filesystem() as fs:
verify_result(runner.invoke(res, ["-v", "debug", "init", "-y"]))
def test_init_from_archive():
runner = CliRunner()
with runner.isolated_filesystem() as fs:
project_folder = Path(fs)
run_shell_cmd("which python")
verify_result(
runner.invoke(
res,
[
"-v",
"debug",
"init",
"-u",
"https://resolos.s3.eu-central-1.amazonaws.com/examples/v0.3.0/data_with_pandas.tar.gz",
],
)
)
assert (project_folder / "README.md").exists()
assert (project_folder / "process_dataset.py").exists()
assert (project_folder / "var_spx_monthly.csv").exists()
assert not (project_folder / "var_spx_monthly_mean.csv").exists()
output = verify_result(
runner.invoke(
res_run,
["which python; python process_dataset.py"],
)
)
assert "Written the mean of the columns to var_spx_monthly_mean.csv" in output
assert (project_folder / "README.md").exists()
assert (project_folder / "process_dataset.py").exists()
assert (project_folder / "var_spx_monthly.csv").exists()
assert (project_folder / "var_spx_monthly_mean.csv").exists()
| 34.612245 | 108 | 0.595519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.236439 |
e3619a35a7b1327801ed3520deafd02b23723cdc | 946 | py | Python | performence/get_flops.py | idealboy/mmsr | f8284e9fb977a74db1904e6034b768805845e138 | [
"Apache-2.0"
]
| 2 | 2020-08-05T05:13:14.000Z | 2020-11-10T03:37:48.000Z | performence/get_flops.py | idealboy/mmsr | f8284e9fb977a74db1904e6034b768805845e138 | [
"Apache-2.0"
]
| null | null | null | performence/get_flops.py | idealboy/mmsr | f8284e9fb977a74db1904e6034b768805845e138 | [
"Apache-2.0"
]
| null | null | null | import os
import sys
op_name = []
with open("name.txt") as lines:
for line in lines:
line = line.strip()
op_name.append(line)
with open("shape.txt") as lines:
index = 0
for line in lines:
name = op_name[index]
line = line.strip()
items = line.split("\t")
if "conv" in name:
input_shape = [int(s) for s in items[0].split("#")[0].split("[")[1].split("]")[0].split(",")]
weight_shape = [int(s) for s in items[0].split("#")[1].split("[")[1].split("]")[0].split(",")]
output_shape = [int(s) for s in items[1].split("[")[1].split("]")[0].split(",")]
flops = output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * weight_shape[0] * weight_shape[1] * weight_shape[2] * 2
elif "add" in name:
output_shape = [int(s) for s in items[1].split("[")[1].split("]")[0].split(",")]
flops = output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3]
else:
flops = 0
print flops
index+=1
| 32.62069 | 138 | 0.605708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.082452 |
e3621a99c91f66bf8b690df838e64f5433dfeefe | 6,086 | py | Python | pan/mesh_bee_wrapper.py | KillingJacky/CloudPan | 128f438b8d84c734aad94ad3e03f6c3aa12b66af | [
"MIT"
]
| 1 | 2016-08-29T14:28:45.000Z | 2016-08-29T14:28:45.000Z | pan/mesh_bee_wrapper.py | KillingJacky/CloudPan | 128f438b8d84c734aad94ad3e03f6c3aa12b66af | [
"MIT"
]
| null | null | null | pan/mesh_bee_wrapper.py | KillingJacky/CloudPan | 128f438b8d84c734aad94ad3e03f6c3aa12b66af | [
"MIT"
]
| null | null | null | # Wrapper for Mesh Bee library
# helping to easier communicate with Mesh Bee module
#
# Copyright (C) 2014 at seeedstudio
# Author: Jack Shao ([email protected])
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import glob
import binascii
import logging
from pan.mesh_bee import *
from factory import Factory
class MeshBeeWrapper(object):
"""
"""
default_port_name = 'serial'
serial = None
meshbee = None
logger = None
buffer = dict()
def log(self, level, message):
if self.logger:
self.logger.log(level, message)
def disconnect(self):
"""
Closes serial port
"""
self.meshbee.halt()
self.serial.close()
return True
def connect(self):
"""
Creates an meshbee instance
"""
try:
self.log(logging.INFO, "Connecting to meshbee")
self.meshbee = MeshBee(self.serial, self.logger, callback=self.process)
except Exception,e:
print e
return False
return True
def process(self, packet):
self.log(logging.DEBUG, packet)
try:
address = packet['src_addr64']
except KeyError:
self.log(logging.ERROR, "the resp packet is not a remote resp.")
return
if packet['frame_type'] == 'API_REMOTE_AT_RESP':
if packet['cmd_id_str'] == 'ATIO':
self.on_message(address, packet['resp_body']['dio'], packet['resp_body']['state'], 'dio')
elif packet['cmd_id_str'] == 'ATAD':
self.on_message(address, packet['resp_body']['src'], packet['resp_body']['value'], 'adc')
# Data sent through the serial connection of the remote radio
if packet['frame_type'] == 'API_DATA_PACKET':
# Some streams arrive split in different packets
# we buffer the data until we get an EOL
self.buffer[address] = self.buffer.get(address,'') + packet['data']
count = self.buffer[address].count('\n')
if (count):
lines = self.buffer[address].splitlines()
try:
self.buffer[address] = lines[count:][0]
except:
self.buffer[address] = ''
for line in lines[:count]:
line = line.rstrip()
try:
port, value = line.split(':', 1)
except:
value = line
port = self.default_port_name
self.on_message(address, port, value, 'data')
#oliver add type: dio/adc/data
def on_message(self, address, port, value, type):
"""
Hook for outgoing messages.
"""
None
def send_message (self, type, address, msg, port = b'\x0c'):
"""
Sends a message to a remote radio
"""
self.log(logging.DEBUG, 'send_message type: %s'%type)
try:
addr_len = len(address)
address = binascii.unhexlify(address)
if len(port) > 1:
port = globals()[port]
if type == 'dio':
#number = struct.pack('< B',port)
rw = GPIO_RD
value = PIN_LOW
if msg is not None:
rw = GPIO_WR
value = PIN_LOW if int(msg) == 0 else PIN_HIGH
if addr_len > 4:
self.meshbee.API_REMOTE_AT_REQ(cmd_id = ATIO, dest_addr64 = address, rw = rw, dio=port, state=value)
else:
self.meshbee.API_REMOTE_AT_REQ(cmd_id = ATIO, dest_addr = address, rw = rw, dio=port, state=value)
self.log(logging.DEBUG, "send remote_at cmd: ATIO at %s "% DIO_name_map[port])
return True
elif type == 'adc':
src = port
if addr_len > 4:
self.meshbee.API_REMOTE_AT_REQ(cmd_id = ATAD, dest_addr64=address, src = src)
else:
self.meshbee.API_REMOTE_AT_REQ(cmd_id = ATAD, dest_addr=address, src = src)
self.log(logging.DEBUG, "send remote_at cmd: ATAD at %s"% AIO_name_map[src])
return True
elif type == 'data':
print msg
msg_len = len(msg)
if addr_len > 4:
self.meshbee.API_DATA_PACKET(option = b'\x00', data = msg, data_len = struct.pack('> B', msg_len), dest_addr64 = address)
else:
self.meshbee.API_DATA_PACKET(option = b'\x00', data = msg, data_len = struct.pack('> B', msg_len), dest_addr = address)
self.log(logging.DEBUG, "sent data: %s"% msg)
return True
elif type == 'rpc':
pass
except Exception,e:
print e
pass
return False
Factory.register(MeshBeeWrapper)
| 34.977011 | 141 | 0.565067 | 4,636 | 0.761748 | 0 | 0 | 0 | 0 | 0 | 0 | 2,124 | 0.348998 |
e36654005301b9cf41913be091e578a74c259424 | 1,669 | py | Python | pythonapm/collector/test_reqhandler.py | nextapm/pythonapm | ddd8ad374e4f268516fc81f0bf710206565b737e | [
"FTL"
]
| null | null | null | pythonapm/collector/test_reqhandler.py | nextapm/pythonapm | ddd8ad374e4f268516fc81f0bf710206565b737e | [
"FTL"
]
| null | null | null | pythonapm/collector/test_reqhandler.py | nextapm/pythonapm | ddd8ad374e4f268516fc81f0bf710206565b737e | [
"FTL"
]
| null | null | null | import unittest
import json
import requests
from unittest import mock
from .reqhandler import send_req
from pythonapm.agent import Agent
from pythonapm import constants
class Resp:
def __init__(self):
self.data = json.dumps({'txn':'success'})
def json(self):
return json.loads(self.data)
class ReqhandlerTest(unittest.TestCase):
def setUp(self):
self.agent = Agent()
self.agent.config.license_key = 'key'
self.agent.config.project_id = 'id'
self.agent.config.print_payload = True
@mock.patch('pythonapm.collector.reqhandler.get_agent')
@mock.patch('pythonapm.collector.reqhandler.agentlogger')
@mock.patch('pythonapm.collector.reqhandler.requests')
def test_send_req(self,mock_requests,mock_logger,mock_agent):
mock_agent.return_value = self.agent
payload = {'txn':'txn_data'}
payload_str = json.dumps(payload)
response_data = {'txn':'success'}
complete_url = f'{constants.collector_domain}/api/agent?licenseKey=key&projectId=id'
mock_requests.post.return_value = Resp()
send_req('/api/agent',payload)
mock_requests.post.assert_called_with(complete_url,data= payload_str,headers = {'content-type':'application/json'})
mock_logger.info.assert_called_with(f'response for /api/agent request :{json.dumps(response_data)}')
self.assertEqual(mock_logger.info.call_count,3)
self.assertListEqual(mock_logger.info.mock_calls,[mock.call(f'sending request to {constants.collector_domain}/api/agent'),mock.call(f'payload :{payload_str}'),mock.call('response for /api/agent request :{"txn": "success"}')])
| 37.931818 | 233 | 0.711803 | 1,490 | 0.89275 | 0 | 0 | 1,113 | 0.666866 | 0 | 0 | 493 | 0.295386 |
e366e2fcb39a47a49999de24a158d9a70e017103 | 277 | py | Python | app/admin.py | hbuiOnline/AMS | d9118aee7b5ddd90d54bf4cf7f5cdd11c8e4a511 | [
"MIT"
]
| null | null | null | app/admin.py | hbuiOnline/AMS | d9118aee7b5ddd90d54bf4cf7f5cdd11c8e4a511 | [
"MIT"
]
| null | null | null | app/admin.py | hbuiOnline/AMS | d9118aee7b5ddd90d54bf4cf7f5cdd11c8e4a511 | [
"MIT"
]
| null | null | null | from django.contrib import admin
# Register your models here.
from .models import * # To import all the model from .models, then specify those in register
admin.site.register(Customer)
admin.site.register(Staff)
admin.site.register(Service)
admin.site.register(Appointment) | 27.7 | 93 | 0.794224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.353791 |
e36726ae66238ed268d95c0acb72decccf95ea5d | 17,263 | py | Python | static/brythonlib/cs1robots/worlds_data.py | pythonpad/vue-pythonpad-runner | 52decba9607b3b7b050ee0bf6dd4ef07ae644587 | [
"MIT"
]
| 3 | 2021-01-26T16:18:45.000Z | 2021-09-15T00:57:12.000Z | static/brythonlib/cs1robots/worlds_data.py | pythonpad/vue-pythonpad-runner | 52decba9607b3b7b050ee0bf6dd4ef07ae644587 | [
"MIT"
]
| null | null | null | static/brythonlib/cs1robots/worlds_data.py | pythonpad/vue-pythonpad-runner | 52decba9607b3b7b050ee0bf6dd4ef07ae644587 | [
"MIT"
]
| 2 | 2021-01-26T16:18:47.000Z | 2021-10-21T20:45:20.000Z | def conv_world(kaist_world_dict):
pieces = {}
for (sx, sy), count in kaist_world_dict['beepers'].items():
for i in range(count):
beeper_id = len(pieces)
pieces[beeper_id] = {
'type': 'beeper',
'piece_type': 'marker',
'id': beeper_id,
'position': {
'type': 'position',
'x': sx - 1,
'y': sy - 1,
},
}
walls = []
for sx, sy in kaist_world_dict['walls']:
x1, y1 = (sx - 1) // 2, (sy - 1) // 2
if sx % 2 == 0:
x2 = x1 + 1
y2 = y1
else:
x2 = x1
y2 = y1 + 1
walls.append({
'type': 'wall',
'position_1': {
'type': 'position',
'x': x1,
'y': y1,
},
'position_2': {
'type': 'position',
'x': x2,
'y': y2,
},
})
return {
'type': 'world',
'width': kaist_world_dict['avenues'],
'height': kaist_world_dict['streets'],
'pieces': pieces,
'walls': walls
}
def get_world_dict(title):
global worlds_data
if title not in worlds_data:
raise ValueError('Unknown world name: "%s"' % title)
return conv_world(worlds_data[title])
worlds_data = {
'around': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(1, 9): 1, (2, 10): 1, (8, 10): 1, (10, 10): 1, (9, 10): 1, (5, 10): 1, (10, 8): 1, (10, 4): 1, (10, 1): 1, (8, 1): 1, (7, 1): 1, (6, 1): 1, (5, 1): 1, (3, 1): 1, (1, 6): 1, (1, 5): 1, (1, 3): 1}},
'around2': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(2, 1): 2, (3, 1): 3, (5, 1): 2, (7, 1): 1, (10, 1): 1, (10, 4): 3, (10, 3): 1, (10, 7): 2, (10, 6): 1, (10, 10): 4, (10, 9): 3, (9, 10): 1, (7, 10): 2, (5, 10): 1, (4, 10): 1, (3, 10): 1, (2, 10): 1, (1, 10): 2, (1, 8): 1, (1, 6): 4, (1, 5): 1, (1, 3): 3, (1, 2): 1}},
'around3': {'avenues': 6, 'streets': 6, 'walls': [], 'beepers': {(2, 1): 2, (3, 1): 1, (6, 1): 1, (6, 2): 3, (6, 3): 1, (6, 6): 2, (4, 6): 3, (1, 6): 1, (1, 4): 2, (1, 3): 1, (1, 2): 1}},
'cave': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (4, 1), (5, 4), (2, 5), (3, 6), (5, 6), (6, 3), (6, 1), (8, 1), (8, 3), (9, 4), (10, 3), (11, 2), (1, 8), (3, 8), (5, 8), (7, 8), (8, 7), (14, 1), (14, 3), (13, 4), (11, 6), (12, 7), (13, 8), (14, 7), (14, 5), (9, 8)], 'beepers': {(6, 5): 1}},
'cave2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (4, 1), (4, 3), (4, 5), (3, 6), (1, 8), (3, 8), (5, 8), (6, 7), (7, 8), (9, 8), (10, 7), (9, 6), (8, 5), (8, 1), (10, 1), (10, 3), (7, 4), (6, 3)], 'beepers': {(6, 3): 1}},
'cave3': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (1, 4), (5, 2), (6, 1), (3, 4), (5, 6), (3, 6), (2, 5), (6, 3), (7, 6), (8, 5), (8, 1), (9, 2), (12, 1), (12, 3), (12, 5), (9, 4), (12, 7), (11, 8), (11, 6), (9, 8), (7, 8), (5, 8), (3, 8)], 'beepers': {(1, 5): 4, (2, 2): 2, (3, 3): 3, (4, 2): 1, (6, 2): 1, (5, 4): 1, (1, 4): 3}},
'cave4': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (1, 4), (3, 2), (5, 2), (3, 4), (5, 6), (6, 5), (7, 4), (8, 3), (8, 1), (2, 5), (1, 8), (3, 8), (5, 8), (7, 8), (9, 8), (9, 6), (10, 5), (11, 8), (12, 7), (12, 5), (11, 4), (12, 1), (10, 3)], 'beepers': {(3, 2): 1, (2, 4): 3, (4, 4): 3, (7, 2): 4}},
'chimney': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (2, 9), (2, 11), (4, 11), (4, 9), (4, 7), (4, 5), (4, 3), (3, 12), (5, 2), (6, 3), (6, 5), (7, 6), (8, 5), (8, 3), (9, 2), (11, 2), (12, 3), (12, 5), (12, 7), (12, 9), (13, 10), (14, 9), (14, 7), (14, 5), (14, 3), (15, 2), (16, 3), (16, 5), (16, 7), (16, 9), (16, 11), (16, 13), (16, 15), (17, 16), (18, 15), (18, 13), (18, 11), (18, 9), (18, 7), (18, 5), (18, 3), (19, 2)], 'beepers': {(2, 6): 1, (2, 5): 1, (2, 4): 2, (2, 2): 1, (9, 7): 1, (9, 5): 2, (9, 4): 3, (4, 3): 5, (7, 2): 1, (7, 4): 3, (7, 3): 1, (7, 5): 1}},
'chimney2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (3, 8), (4, 7), (4, 5), (4, 3), (4, 9), (4, 11), (4, 13), (4, 15), (5, 16), (6, 15), (6, 13), (6, 11), (6, 9), (6, 7), (6, 5), (6, 3), (7, 2), (8, 3), (10, 3), (11, 2), (13, 2), (14, 3), (16, 3), (18, 3), (17, 2), (18, 5), (18, 7), (18, 9), (18, 11), (18, 13), (18, 15), (19, 16), (15, 4), (8, 5), (10, 5), (10, 11), (9, 12), (8, 11), (8, 9), (10, 9), (10, 7), (8, 7)], 'beepers': {(3, 8): 2, (8, 2): 3, (2, 3): 2, (2, 4): 1, (3, 3): 3, (3, 2): 2, (3, 5): 3, (3, 6): 1, (5, 2): 2, (5, 6): 1, (10, 7): 2}},
'chimney3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (3, 8), (4, 7), (4, 5), (4, 3), (4, 9), (4, 11), (5, 12), (6, 11), (6, 9), (6, 7), (6, 5), (6, 3), (7, 2), (9, 2), (10, 3), (10, 5), (10, 7), (11, 8), (12, 9), (12, 11), (13, 12), (14, 11), (14, 9), (15, 8), (16, 9), (16, 11), (16, 15), (16, 13), (16, 17), (18, 17), (18, 15), (18, 13), (18, 11), (18, 9), (19, 8), (13, 2), (15, 2), (17, 2), (19, 2), (13, 4), (15, 4), (17, 4), (19, 4), (13, 6), (15, 6), (17, 6), (19, 6), (17, 18)], 'beepers': {(3, 2): 1, (2, 3): 3, (2, 4): 2, (3, 4): 6, (3, 5): 1, (7, 6): 5, (7, 5): 1, (9, 5): 3, (9, 7): 2}},
'mine': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(2, 1): 1, (3, 1): 1, (5, 1): 1, (8, 1): 1, (10, 1): 1}},
'mine2':{'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(2, 1): 2, (3, 1): 2, (6, 1): 3, (5, 1): 1, (8, 1): 1, (10, 1): 4}},
'mine3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(10, 1): 5, (9, 1): 1, (8, 1): 3, (6, 1): 2, (1, 1): 2, (2, 1): 1, (3, 1): 3}},
'mine4': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (6, 3), (7, 4), (8, 1), (9, 2), (11, 2), (12, 1), (9, 4), (11, 4), (13, 4), (14, 3), (15, 2), (17, 2), (19, 2)], 'beepers': {(10, 1): 2, (8, 1): 3, (7, 2): 1, (7, 1): 1, (4, 2): 6, (5, 2): 1, (4, 1): 1, (3, 1): 2}},
'mine5': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (6, 3), (9, 2), (8, 1), (10, 1), (7, 4), (9, 4), (11, 4), (12, 3), (13, 2), (14, 3), (14, 5), (14, 7), (15, 8), (17, 8), (19, 8), (17, 6), (16, 5), (18, 5), (19, 4), (16, 3), (16, 1)], 'beepers': {(10, 3): 1, (2, 1): 2, (4, 1): 3, (5, 2): 2, (7, 1): 3, (8, 2): 4, (8, 3): 1, (8, 4): 2}},
'stairs': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (16, 15), (17, 16), (18, 17), (19, 18)], 'beepers': {(10, 10): 1}},
'stairs2': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (7, 4), (8, 5), (9, 6), (11, 6), (12, 7), (13, 8), (14, 9), (15, 10), (17, 10), (18, 11), (19, 12)], 'beepers': {(10, 7): 1}},
'stairs3': {'avenues': 10, 'streets': 10, 'walls': [(4, 1), (5, 2), (6, 3), (7, 4), (9, 4), (11, 4), (12, 5), (13, 6), (14, 7), (15, 8), (17, 8), (18, 9), (19, 10)], 'beepers': {(10, 6): 1}},
'stairs4': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (4, 3), (5, 4), (7, 4), (9, 4), (11, 4), (12, 5), (13, 6), (15, 6), (16, 7), (17, 8), (18, 9), (19, 10)], 'beepers': {(4, 3): 1}},
'coins': {'avenues': 10, 'streets': 10, 'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (2, 3), (2, 5), (2, 7), (2, 9), (2, 11), (2, 13), (2, 15), (2, 17), (2, 19)], 'beepers': {(2, 1): 1, (4, 1): 3, (5, 1): 2, (8, 1): 3, (7, 1): 6, (1, 2): 3, (1, 10): 1, (1, 8): 3, (1, 9): 1, (1, 4): 1}},
'coins2': {'avenues': 10, 'streets': 10, 'walls': [(2, 19), (2, 17), (2, 15), (2, 13), (2, 11), (2, 9), (2, 7), (2, 5), (2, 3), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(6, 1): 1, (7, 1): 1, (5, 1): 2, (10, 1): 3, (2, 1): 1, (1, 2): 3, (1, 3): 2, (1, 6): 4, (1, 10): 7}},
'news': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (4, 3), (5, 4), (6, 3), (7, 2), (8, 3), (9, 4), (10, 3), (11, 2), (13, 2), (14, 3), (15, 4), (16, 3), (17, 2), (19, 2)], 'beepers': {}},
'news2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (3, 4), (4, 3), (5, 2), (6, 3), (7, 4), (8, 3), (9, 2), (10, 3), (11, 4), (12, 3), (15, 2), (17, 2), (13, 2), (18, 3), (19, 4)], 'beepers': {}},
'news3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 4), (4, 3), (6, 3), (7, 4), (8, 3), (9, 4), (10, 3), (11, 4), (12, 3), (13, 2), (14, 3), (15, 4), (16, 3), (17, 4), (18, 3), (19, 2)], 'beepers': {}},
'read': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(10, 1): 7}},
'read2': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(9, 1): 2, (10, 1): 4, (8, 1): 3}},
'read3': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(6, 1): 2, (8, 1): 3, (9, 1): 1, (10, 1): 7}},
'hurdles1': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (12, 1), (16, 1)],
'beepers': {(10, 1): 1},
},
'hurdles2': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (12, 1), (16, 1)],
'beepers': {(7, 1): 1},
},
'hurdles3': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (16, 1), (2, 1), (10, 1), (18, 1), (12, 1)],
'beepers': {(10, 1): 1},
},
'beepers1': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(3, 1): 1},
},
'corner3_4': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {},
},
'rain1': {
'avenues': 10,
'streets': 10,
'walls': [(5, 6), (4, 7), (4, 9), (4, 13), (4, 15), (5, 16), (9, 16), (13, 16), (15, 16), (16, 15), (16, 11), (16, 9), (16, 7), (15, 6), (11, 6), (7, 6)],
'beepers': {},
},
'newspaper': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (5, 2), (7, 2), (8, 3), (9, 4), (11, 4), (12, 5), (13, 6), (15, 6), (16, 7), (17, 8), (19, 8)],
'beepers': {},
},
'hurdles4': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (16, 1), (2, 1), (10, 1), (18, 1), (12, 1), (4, 3), (10, 3), (10, 5)],
'beepers': {(10, 1): 1},
},
'frank18': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(7, 4): 1, (3, 7): 2, (7, 1): 19, (6, 6): 2, (3, 4): 2},
},
'rain2': {
'avenues': 12,
'streets': 9,
'walls': [(5, 6), (7, 6), (11, 6), (13, 6), (15, 6), (16, 5), (17, 4), (21, 4), (22, 5), (22, 9), (22, 11), (22, 15), (21, 16), (19, 16), (15, 16), (13, 16), (9, 16), (5, 16), (4, 15), (4, 13), (4, 9), (4, 7)],
'beepers': {},
},
'wrong': {
'avenues': 10,
'streets': 10,
'walls': [10, (10, 3), (10, 5), (1, 10), (3, 10), (5, 10), (2, 1), (2, 3), (1, 6), (3, 6), (4, 5), (4, 3), (5, 2), (6, 3), (7, 8), (5, 8), (2, 7), (7, 10), (8, 7), (9, 6), (8, 3), (9, 4), (9, 10), (10, 9)],
'beepers': {(6, 4): 1},
},
'hanoi3': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(2, 1): 3, (2, 2): 2, (2, 3): 1},
},
'fairy_tale': {
'avenues': 14,
'streets': 8,
'walls': [(1, 10), (3, 10), (4, 9), (5, 8), (6, 7), (9, 8), (11, 8), (12, 7), (12, 5), (12, 3), (12, 1)],
'beepers': {},
},
'hanoi4': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(2, 4): 1, (2, 1): 4, (2, 2): 3, (2, 3): 2},
},
'empty': {
'avenues': 8,
'streets': 8,
'walls': [],
'beepers': {},
},
'trash1': {
'avenues': 10,
'streets': 10,
'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (1, 4), (2, 3)],
'beepers': {(6, 1): 1, (3, 1): 3, (5, 1): 1, (10, 1): 2, (7, 1): 2},
},
'trash2': {
'avenues': 10,
'streets': 10,
'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (1, 4), (2, 3)],
'beepers': {(9, 1): 1, (5, 1): 13, (2, 1): 2, (7, 1): 2},
},
'trash3': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(1, 2): 18, (7, 3): 4, (4, 8): 1, (5, 6): 7, (7, 1): 4, (9, 2): 11, (8, 8): 1, (1, 10): 3, (2, 5): 3, (5, 8): 2, (7, 9): 2},
},
'trash4': {
'avenues': 11,
'streets': 10,
'walls': [],
'beepers': {(6, 9): 3, (1, 3): 2, (9, 8): 2, (10, 6): 1, (5, 1): 2, (1, 11): 2, (10, 3): 1, (5, 5): 2, (2, 9): 1, (6, 10): 2, (1, 5): 1, (2, 2): 1, (8, 6): 2, (4, 10): 1, (8, 2): 1, (8, 11): 2, (9, 10): 3, (4, 11): 1, (2, 7): 1, (4, 6): 1, (9, 2): 1, (3, 4): 3, (5, 7): 1, (3, 8): 3, (7, 8): 5},
},
'amazing3a': {
'avenues': 7,
'streets': 7,
'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13)],
'beepers': {(1, 2): 1, (2, 7): 1, (3, 2): 1, (1, 3): 1, (3, 3): 1, (1, 7): 1, (1, 4): 1, (2, 4): 1, (1, 5): 1, (2, 6): 1, (1, 6): 1, (3, 6): 1, (2, 2): 1, (2, 3): 1, (3, 7): 1, (2, 5): 1, (3, 4): 1, (1, 1): 1, (3, 5): 1},
},
'yardwork': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(1, 2): 18, (7, 3): 4, (4, 8): 1, (5, 6): 7, (7, 1): 4, (9, 2): 11, (8, 8): 1, (1, 10): 3, (2, 5): 3, (5, 8): 2, (7, 9): 2},
},
'sort1': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(1, 2): 1, (1, 3): 1, (2, 2): 1, (1, 4): 1, (2, 4): 1, (1, 5): 1, (1, 6): 1, (2, 1): 1, (1, 7): 1, (2, 3): 1, (2, 5): 1, (1, 1): 1},
},
'harvest4': {
'avenues': 7,
'streets': 7,
'walls': [],
'beepers': {(7, 3): 1, (6, 6): 1, (5, 6): 1, (3, 2): 1, (2, 1): 1, (6, 2): 1, (5, 1): 2, (2, 5): 1, (7, 2): 1, (5, 5): 1, (7, 6): 1, (4, 4): 1, (3, 6): 1, (2, 2): 2, (3, 5): 1, (4, 1): 1, (6, 4): 1, (5, 4): 1, (7, 1): 1, (4, 5): 1, (2, 3): 1, (4, 2): 1, (6, 5): 2, (5, 3): 2, (4, 6): 1, (6, 1): 1, (7, 4): 1, (4, 3): 1, (3, 4): 2, (2, 4): 1},
},
'amazing5': {
'avenues': 7,
'streets': 7,
'walls': [(3, 2), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13), (4, 1), (2, 3), (3, 4), (5, 4)],
'beepers': {},
},
'maze1': {
'avenues': 10,
'streets': 10,
'walls': [(10, 1), (10, 3), (10, 5), (1, 10), (3, 10), (5, 10), (2, 1), (2, 3), (1, 6), (3, 6), (4, 5), (4, 3), (5, 2), (6, 3), (7, 8), (5, 8), (2, 7), (7, 10), (8, 7), (9, 6), (8, 3), (9, 4), (9, 10), (10, 9)],
'beepers': {(6, 4): 1},
},
'harvest1': {
'avenues': 7,
'streets': 7,
'walls': [],
'beepers': {(3, 3): 1, (3, 2): 1, (3, 1): 1, (5, 6): 1, (5, 1): 1, (3, 6): 1, (5, 3): 1, (5, 2): 1, (7, 6): 1, (7, 5): 1, (7, 4): 1, (7, 3): 1, (7, 2): 1, (7, 1): 1, (3, 5): 1, (3, 4): 1, (2, 4): 1, (2, 5): 1, (2, 6): 1, (2, 1): 1, (2, 2): 1, (2, 3): 1, (4, 6): 1, (4, 4): 1, (4, 5): 1, (4, 2): 1, (4, 3): 1, (4, 1): 1, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 1, (6, 6): 1, (5, 5): 1, (5, 4): 1},
},
'amazing1': {
'avenues': 5,
'streets': 5,
'walls': [],
'beepers': {},
},
'harvest2': {
'avenues': 12,
'streets': 12,
'walls': [],
'beepers': {(7, 3): 1, (6, 10): 1, (6, 6): 1, (2, 8): 1, (10, 6): 1, (7, 7): 1, (4, 6): 1, (6, 2): 1, (7, 11): 1, (3, 7): 1, (10, 8): 1, (5, 5): 1, (4, 4): 1, (8, 10): 1, (4, 8): 1, (8, 6): 1, (5, 3): 1, (9, 7): 1, (4, 10): 1, (2, 6): 1, (5, 11): 1, (5, 9): 1, (7, 5): 1, (6, 12): 1, (6, 4): 1, (3, 5): 1, (11, 7): 1, (6, 8): 1, (5, 7): 1, (9, 9): 1, (8, 8): 1, (7, 9): 1, (1, 7): 1, (9, 5): 1, (3, 9): 1, (8, 4): 1},
},
'amazing3': {
'avenues': 7,
'streets': 7,
'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13)],
'beepers': {},
},
'amazing2': {
'avenues': 7,
'streets': 7,
'walls': [(6, 13), (6, 11), (6, 9), (13, 6), (11, 6), (9, 6), (7, 6), (6, 7)],
'beepers': {},
},
'harvest3': {
'avenues': 7,
'streets': 7,
'walls': [],
'beepers': {(7, 3): 1, (6, 6): 1, (5, 6): 1, (3, 2): 1, (2, 1): 1, (6, 2): 1, (5, 1): 1, (2, 5): 1, (7, 2): 1, (7, 6): 1, (4, 4): 1, (3, 6): 1, (2, 2): 1, (3, 5): 1, (4, 1): 1, (6, 4): 1, (5, 4): 1, (7, 1): 1, (4, 5): 1, (5, 5): 1, (2, 3): 1, (4, 2): 1, (6, 5): 1, (5, 3): 1, (4, 6): 1, (3, 4): 1, (6, 1): 1, (7, 4): 1, (4, 3): 1, (2, 4): 1},
},
'add1': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(10, 1): 3, (10, 2): 2}
},
'add2': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(9, 2): 1, (9, 1): 2, (10, 1): 2, (10, 2): 3}
},
'add34': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(8, 2): 9, (7, 1): 1, (8, 1): 3, (9, 2): 8, (10, 1): 4, (10, 2): 7}
},
}
| 60.36014 | 635 | 0.333835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,836 | 0.164282 |
e369c6172f4572b618818bae53263220f4153bc2 | 934 | py | Python | util/label.py | bluehackmaster/bl-api-objdetect | dc8b514e62346904c3b9ab7e88987461721dd6b0 | [
"Apache-2.0"
]
| null | null | null | util/label.py | bluehackmaster/bl-api-objdetect | dc8b514e62346904c3b9ab7e88987461721dd6b0 | [
"Apache-2.0"
]
| 16 | 2020-01-28T21:56:54.000Z | 2022-03-11T23:15:09.000Z | util/label.py | bluehackmaster/bl-api-objdetect | dc8b514e62346904c3b9ab7e88987461721dd6b0 | [
"Apache-2.0"
]
| 1 | 2017-10-17T04:52:08.000Z | 2017-10-17T04:52:08.000Z |
def convert_class_to_code(label_map,
max_num_classes,
use_display_name=True):
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
| 33.357143 | 72 | 0.635974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.122056 |
e36ae367550f66dd2b4a1cdb03a10bf47b3c6b9c | 5,694 | py | Python | hdp_api/routes/__init__.py | CedricCazinHC/HyperAPI | 789419b95679faf550a57773b9cc57107b2b8504 | [
"BSD-3-Clause"
]
| null | null | null | hdp_api/routes/__init__.py | CedricCazinHC/HyperAPI | 789419b95679faf550a57773b9cc57107b2b8504 | [
"BSD-3-Clause"
]
| null | null | null | hdp_api/routes/__init__.py | CedricCazinHC/HyperAPI | 789419b95679faf550a57773b9cc57107b2b8504 | [
"BSD-3-Clause"
]
| null | null | null | from abc import ABCMeta, abstractproperty, abstractmethod
import inspect
import random
import re
import time
from requests.exceptions import HTTPError
class RoutePathInvalidException(Exception):
def __init__(self, name, value, path, validator):
self.path = path
self.name = name
self.value = value
self.validator = validator
def __str__(self):
return 'Route path invalid : {}={} ({})\n\t{}'.format(self.name, self.value, self.validator.__class__.__name__, self.path)
class ValidatorObjectID(object):
"""(str) A 24 hex digit MongoDB ObjectID."""
@staticmethod
def __call__(value):
return re.match('[0-9a-z]{24}','{}'.format(value)) is not None
@staticmethod
def getRandom():
return ''.join(random.choices('0123456789abcdef', k=24))
class ValidatorAny(object):
"""(any) Any object except None and empty string."""
@staticmethod
def __call__(value):
if value is None :
return False
if isinstance(value,str) and not value.strip() :
return False
return True
@staticmethod
def getRandom():
return ''.join(random.choices('0123456789abcdef', k=24))
class ValidatorInt(object):
"""(int) An Integer Value."""
@staticmethod
def __call__(value):
return isinstance(value,int)
@staticmethod
def getRandom():
return random.randint(0,100)
class Route(object):
__metaclass__ = ABCMeta
GET = "GET"
POST = "POST"
_path_keys = {}
VALIDATOR_OBJECTID = ValidatorObjectID()
VALIDATOR_ANY = ValidatorAny()
VALIDATOR_INT = ValidatorInt()
@abstractproperty
def name(self):
"""The Route key (not name) as defined in the API schema"""
return "Route Name"
@abstractproperty
def httpMethod(self):
"""The Route http method as defined in the API schema"""
return "http Method"
@abstractproperty
def path(self):
"""The Route path as defined in the API schema"""
return "Route Path"
def __init__(self,session, watcher=None):
self.session = session
self._watcher = watcher
def __call__(self,**kwargs):
formatter = dict.fromkeys(self._path_keys)
for _path_key, _validator in self._path_keys.items():
_value = kwargs.pop(_path_key,None)
if not _validator(_value) :
raise RoutePathInvalidException(_path_key, _value, self.path, _validator)
formatter[_path_key] = _value
_path = self.path if self.path[0] != '/' else self.path[1:]
_path = _path.format(**formatter)
if self._watcher:
self._watcher(str(self),kwargs.pop('info','call'))
try:
_result = self.session.request(self.httpMethod, _path, **kwargs)
self._watcher(str(self),'200')
return _result
except HTTPError as HE:
self._watcher(str(self), str(HE.response))
raise
return self.session.request(self.httpMethod, _path, **kwargs)
def call_when(self, condition=lambda x:True, call=lambda x: None, step=1, timeout=500, **kwargs):
_remaining = timeout
if self._watcher:
kwargs['info'] = 'call'
while _remaining > 0:
_remaining = _remaining - step
time.sleep(step)
_res = self.__call__(**kwargs)
if condition(_res) :
return call(_res)
elif kwargs.get('info', None) == 'call':
kwargs['info'] = 'retry'
if self._watcher:
self._watcher(str(self),'timeout')
return None
def wait_until(self, condition=lambda x:True, step=1, timeout=60, **kwargs):
_remaining = timeout
if self._watcher:
kwargs['info'] = 'call'
while _remaining > 0:
_remaining = _remaining - step
time.sleep(step)
_res = self.__call__(**kwargs)
if condition(_res) :
return _res
elif kwargs.get('info', None) == 'call':
kwargs['info'] = 'retry'
if self._watcher:
self._watcher(str(self),'timeout')
return None
@property
def help(self):
msg = 'Route {} [{}]'.format(self.name, self.httpMethod)
msg += '\n{}'.format(self.path)
for _k,_v in self._path_keys.items():
msg += '\n{:>20} : {}'.format(_k,_v.__doc__)
msg += '\n'
print(msg)
def __repr__(self):
return '{} <{}> {}:{}'.format(self.__class__.__name__, id(self), self.httpMethod, self.path)
def __str__(self):
return '{: >4}:{}'.format(self.httpMethod, self.path)
class Resource(object):
__metaclass__ = ABCMeta
@abstractproperty
def name(self):
"""The resource name as defined in the API schema"""
return "Resource Name"
def __init__(self,session, watcher=None):
self.session = session
self._routes = {}
for _route in (_m[1] for _m in inspect.getmembers(self.__class__) if inspect.isclass(_m[1]) and issubclass(_m[1], Route)) :
_routeInstance = _route(session, watcher=watcher)
_routeName = _route.__name__.lower().replace('_','')
self.__setattr__(_routeName, _routeInstance)
self._routes[_routeName] = _routeInstance
def __iter__(self):
for _r in self._routes.values():
yield _r
@property
def help(self):
for _r in self._routes.values():
_r.help
def __repr__(self):
return '{} <{}>'.format(self.__class__.__name__, id(self))
| 31.114754 | 131 | 0.592378 | 5,530 | 0.971198 | 81 | 0.014226 | 1,533 | 0.269231 | 0 | 0 | 693 | 0.121707 |
e36b3c194ac71da00a1987d9f541d4a940300816 | 332 | py | Python | compsocsite/polls/migrations/0108_merge_20180105_1930.py | ReedyChen/opra | 86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f | [
"MIT"
]
| 8 | 2017-03-07T19:46:51.000Z | 2021-06-01T01:41:37.000Z | compsocsite/polls/migrations/0108_merge_20180105_1930.py | ReedyChen/opra | 86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f | [
"MIT"
]
| null | null | null | compsocsite/polls/migrations/0108_merge_20180105_1930.py | ReedyChen/opra | 86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f | [
"MIT"
]
| 9 | 2016-06-09T03:36:20.000Z | 2019-09-11T20:56:23.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-06 00:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0107_auto_20180101_2108'),
('polls', '0107_auto_20180105_1858'),
]
operations = [
]
| 19.529412 | 46 | 0.659639 | 184 | 0.554217 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.400602 |
e36c689a2b24e54549cda9f00830211a35aefafa | 5,141 | py | Python | source.py | Sakshisingh05/ClockChian | 21ce1005c83b003a9fc62203d03c50b3e8f70793 | [
"MIT"
]
| null | null | null | source.py | Sakshisingh05/ClockChian | 21ce1005c83b003a9fc62203d03c50b3e8f70793 | [
"MIT"
]
| null | null | null | source.py | Sakshisingh05/ClockChian | 21ce1005c83b003a9fc62203d03c50b3e8f70793 | [
"MIT"
]
| null | null | null | from flask import Flask
from flask import render_template, redirect, url_for
from flask import request
import blockChain
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
# print(request.method )
if request.method == 'POST':
text = request.form['text']
if len(text) < 1:
return redirect(url_for('index'))
try:
make_proof = request.form['make_proof']
except Exception:
make_proof = False
blockChain.write_block(text, make_proof)
return redirect(url_for('index'))
return render_template('index.html')
@app.route('/check', methods=[ 'POST'])
def integrity():
results = blockChain.check_blocks_integrity()
if request.method == 'POST':
return render_template('index.html', results=results)
return render_template('index.html')
@app.route('/mining', methods=[ 'POST'])
def mining():
if request.method == 'POST':
max_index = int(blockChain.get_next_block())
for i in range(2, max_index):
blockChain.get_POW(i)
return render_template('index.html', querry=max_index)
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
import hashlib
import json
import os
from time import time
BLOCKCHAIN_DIR = os.curdir + '/blocks/'
def check_blocks_integrity():
result = list()
cur_proof = - 1
for i in range(2, int(get_next_block())):
prev_index = str(i-1)
cur_index = str(i)
tmp = {'block' : '', 'result' : '', 'proof': ''}
try:
file_dict = json.load(open(BLOCKCHAIN_DIR + cur_index + '.json'))
cur_hash = file_dict['prev_hash']
cur_proof = file_dict['proof']
except Exception as e:
print(e)
try:
prev_hash = hashlib.sha256(open(BLOCKCHAIN_DIR + prev_index + '.json', 'rb').read()).hexdigest()
except Exception as e:
print(e)
tmp['block'] = prev_index
tmp['proof'] = cur_proof
if cur_hash == prev_hash:
tmp['result'] = 'ok'
else:
tmp['result'] = 'error'
result.append(tmp)
return result
def check_block(index):
cur_index = str(index)
prev_index = str(int(index) - 1)
cur_proof = - 1
cur_hash = 0
prev_hash =0
tmp = {'block' : '', 'result' : '', 'proof': ''}
try:
file_dict = json.load(open(BLOCKCHAIN_DIR + cur_index + '.json'))
cur_hash = file_dict['prev_hash']
cur_proof = file_dict['proof']
except Exception as e:
print(e)
try:
prev_hash = hashlib.sha256(open(BLOCKCHAIN_DIR + prev_index + '.json', 'rb').read()).hexdigest()
except Exception as e:
print(e)
tmp['block'] = prev_index
tmp['proof'] = cur_proof
if cur_hash == prev_hash:
tmp['result'] = 'ok'
else:
tmp['result'] = 'error'
return tmp
def get_hash(file_name):
file_name = str(file_name)
if not file_name.endswith('.json'):
file_name += '.json'
try:
with open(BLOCKCHAIN_DIR + file_name, 'rb') as file:
return hashlib.sha256(file.read()).hexdigest()
except Exception as e:
print('File "'+file_name+'" does not exist!n', e)
def get_next_block():
files = os.listdir(BLOCKCHAIN_DIR)
index_list = [int(file.split('.')[0]) for file in files]
cur_index = sorted(index_list)[-1]
next_index = cur_index + 1
return str(next_index)
def is_valid_proof(last_proof, proof, difficulty):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def get_POW(file_name, difficulty=1):
# POW - proof of work
file_name = str(file_name)
if file_name.endswith('.json'):
file_name = int(file_name.split('.')[0])
else:
file_name = int(file_name)
last_proof = json.load(open(BLOCKCHAIN_DIR + str(file_name - 1) + '.json'))['proof']
proof = 0
while is_valid_proof(last_proof, proof, difficulty) is False:
proof += 1
cur_block = json.load(open(BLOCKCHAIN_DIR + str(file_name) + '.json'))
cur_block['proof'] = proof
cur_block['prev_hash'] = get_hash(str(file_name - 1))
with open(BLOCKCHAIN_DIR + str(file_name) + '.json', 'w') as file:
json.dump(cur_block, file, indent=4, ensure_ascii=False)
def write_block(text, make_proof=False):
cur_index = get_next_block()
prev_index = str(int(cur_index) - 1)
prev_block_hash = get_hash(prev_index)
data = {'text' : text,
'prev_hash' : prev_block_hash,
'timestamp' : time(),
'proof' : -1,
'index' : cur_index
}
with open(BLOCKCHAIN_DIR + cur_index + '.json', 'w') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
if make_proof is True:
get_POW(str(cur_index))
if __name__ == '__main__':
# for i in range(10):
# write_block(str(i),True)
for i in range(2,10):
print(check_block(str(i)))
print(check_blocks_integrity())
| 29.545977 | 108 | 0.606497 | 0 | 0 | 0 | 0 | 1,036 | 0.201517 | 0 | 0 | 670 | 0.130325 |
e36dc2963b3e15b6183197cc7bce8f0677915722 | 27 | py | Python | rtmp/__init__.py | notnola/pinybot | 8ad579fe5652b42a8fb9486c8d11962f5972f817 | [
"MIT"
]
| null | null | null | rtmp/__init__.py | notnola/pinybot | 8ad579fe5652b42a8fb9486c8d11962f5972f817 | [
"MIT"
]
| null | null | null | rtmp/__init__.py | notnola/pinybot | 8ad579fe5652b42a8fb9486c8d11962f5972f817 | [
"MIT"
]
| 1 | 2019-01-31T01:07:56.000Z | 2019-01-31T01:07:56.000Z | __author__ = 'TechWhizZ199' | 27 | 27 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.518519 |
e36e0bc7f72121825603a719d4feff88206f860b | 5,668 | py | Python | download_pinterest_images.py | BrunoKrinski/pinterest_download_tools | c804f83bc97c418ea44f1d179ad9864e90631fe5 | [
"MIT"
]
| 1 | 2022-03-07T04:38:26.000Z | 2022-03-07T04:38:26.000Z | download_pinterest_images.py | BrunoKrinski/pinterest_download_tools | c804f83bc97c418ea44f1d179ad9864e90631fe5 | [
"MIT"
]
| null | null | null | download_pinterest_images.py | BrunoKrinski/pinterest_download_tools | c804f83bc97c418ea44f1d179ad9864e90631fe5 | [
"MIT"
]
| null | null | null | import os
import wget
import time
import argparse
import subprocess
import geckodriver_autoinstaller
import chromedriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import FirefoxOptions
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium_stealth import stealth
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#def execute_with_retry(method, max_attempts):
# e = None
# for i in range(0, max_attempts):
# try:
# return method()
# except Exception as e:
# print(e)
# time.sleep(1)
# if e is not None:
# raise e
def download_images(urls, dpath):
urls = list(set(urls))
print('\nDownloading imagens...')
log_file.write('Downloading imagens...\n')
icont = 0
for url in urls:
try:
wget.download(url, out=dpath)
icont += 1
except:
print('\nCound not download the image: ' + url)
images_err.write('Cound not download the image: ' + url + '\n')
#os.system("rm *\(1\)*")
#os.system("rm images/*\(1\)*")
#os.system("Get-ChildItem -recurse | Where-Object {$_.Name -match 'images/ \(1\)'} | Remove-Item")
subprocess.run(["powershell", "-Command",
"Get-ChildItem -recurse -Path images | Where-Object {$_.Name -match '\(1\)'} | Remove-Item"])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, dest='user', action='store',
required=True, help='Windows or Linux user.')
parser.add_argument('--link', type=str, dest='link', action='store',
help='Url to a pinterest folder.')
parser.add_argument('--list', type=str, dest='url_list', action='store',
help='Path to a txt file with a list of urls.')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
user = args.user
link = args.link
url_list = args.url_list
if link == None:
if url_list == None:
print('Please enter an url or an url file!')
exit()
links = open(url_list, 'r').read().splitlines()
else:
links = [link]
log_file = open('log.txt','w')
images_err = open('images_err.txt', 'w')
#geckodriver_autoinstaller.install()
chromedriver_autoinstaller.install()
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--user-data-dir=C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(user))
driver = webdriver.Chrome(options=options)
images_folder = 'images'
print('Creating folder ' + images_folder + '...!')
log_file.write('Creating folder ' + images_folder + '...!\n')
os.makedirs(images_folder, exist_ok=True)
num_links = len(links)
cont = 0
for link in links:
dpath = 'images/' + str(cont).zfill(4)
os.mkdir(dpath)
print('\nDownloading ' + str(cont) + '/' + str(num_links) + '...')
log_file.write('Downloading ' + str(cont) + '/' + \
str(num_links) + '...\n')
cont += 1
print('Accessing pinterest link: ' + link)
log_file.write('Accessing pinterest link: ' + link + '\n')
try:
driver.get(link)
print('Link successfully accessed!')
log_file.write('Link successfully accessed!\n')
except TimeoutException as e:
print('Could not access the link:' + link)
log_file.write('Could not access the link:' + link + '\n')
#exit()
print('Waitning page load...')
log_file.write('Waiting page load...\n')
time.sleep(10)
last_height = driver.execute_script("return document.body.scrollHeight")
urls = []
len_urls = 0
change_times = 0
scroll_times = 0
print('Searching images... It can take a long time!')
log_file.write('Searching images... It can take a long time!\n')
cont_images = 0
while True:
link_tags = driver.find_elements_by_tag_name('img')
for tag in link_tags:
try:
url = tag.get_attribute('srcset')
url = url.split(' ')
if len(url) == 8:
url = url[6]
urls.append(url)
except:
continue
driver.execute_script("window.scrollBy(0, 50);")
scroll_times += 1
if scroll_times == 50:
cont_images += len(urls)
download_images(urls, dpath)
urls = []
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height or cont_images > 20000:
break
else:
last_height = new_height
scroll_times = 0
log_file.close()
images_err.close()
| 35.873418 | 114 | 0.569689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,580 | 0.278758 |
e36ead0127bc40a1f4670d0eba027d0736c82d0a | 781 | py | Python | kafka_scripts/kafka-producer-stream-algorithm.py | walterjgsp/meaning | 71fd69eab430d364baefb31096c866999de9b4dd | [
"MIT"
]
| null | null | null | kafka_scripts/kafka-producer-stream-algorithm.py | walterjgsp/meaning | 71fd69eab430d364baefb31096c866999de9b4dd | [
"MIT"
]
| null | null | null | kafka_scripts/kafka-producer-stream-algorithm.py | walterjgsp/meaning | 71fd69eab430d364baefb31096c866999de9b4dd | [
"MIT"
]
| null | null | null | from kafka import KafkaProducer
import json
import random
from time import sleep
from datetime import datetime
# Create an instance of the Kafka producer
producer = KafkaProducer(bootstrap_servers='kafka-server:9092',
value_serializer=lambda m: json.dumps(
m).encode('utf-8'),
api_version=(0, 11, 5))
stream_algorithm_str = {"id":"1","import_str": "from sklearn.tree import DecisionTreeClassifier",
"alg_str": "DecisionTreeClassifier", "parameters_str": None,
"db_training_path": "test_training.csv","db_test_path":"test_test.csv"}
producer.send('sk-individual-topic', stream_algorithm_str)
# block until all async messages are sent
producer.flush()
| 37.190476 | 97 | 0.658131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.400768 |
e3727e0484521064be92f2b66e6c5b9dd289ef54 | 897 | py | Python | kinsumer/helpers.py | ungikim/kinsumer | 01bd9626d985bc3c239b979f0d98094f78cc102f | [
"MIT"
]
| 5 | 2018-03-09T05:16:38.000Z | 2021-11-12T11:56:18.000Z | kinsumer/helpers.py | ungikim/kinsumer | 01bd9626d985bc3c239b979f0d98094f78cc102f | [
"MIT"
]
| 2 | 2017-10-16T06:38:28.000Z | 2017-10-18T08:05:37.000Z | kinsumer/helpers.py | balancehero/kinsumer | 01bd9626d985bc3c239b979f0d98094f78cc102f | [
"MIT"
]
| 1 | 2017-10-18T08:15:28.000Z | 2017-10-18T08:15:28.000Z | """:mod:`kinsumer.helpers` --- Implements various helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from threading import RLock
_missing = object()
class locked_cached_property(object):
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
| 27.181818 | 61 | 0.570792 | 596 | 0.664437 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.133779 |
e372ca50f1bbb91d278a5aa868f3b3246267b836 | 4,886 | py | Python | python/tests/pyspark/feature/string_map_test.py | voganrc/mleap | 68cbf375968d9f55acb1d673a4c2390602c0274a | [
"Apache-2.0"
]
| 1,401 | 2017-01-07T03:34:44.000Z | 2022-03-31T22:17:58.000Z | python/tests/pyspark/feature/string_map_test.py | liang0/mleap | 41dbde99e389873fc609083cce5d610cea9e9170 | [
"Apache-2.0"
]
| 546 | 2016-12-30T19:10:55.000Z | 2022-03-31T16:56:52.000Z | python/tests/pyspark/feature/string_map_test.py | liang0/mleap | 41dbde99e389873fc609083cce5d610cea9e9170 | [
"Apache-2.0"
]
| 326 | 2017-01-24T10:35:41.000Z | 2022-03-15T15:53:17.000Z | import os
import tempfile
import unittest
from py4j.protocol import Py4JJavaError
from pyspark.ml import Pipeline
from pyspark.sql import types as t
from mleap.pyspark.feature.string_map import StringMap
from mleap.pyspark.spark_support import SimpleSparkSerializer
from tests.pyspark.lib.assertions import assert_df
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False),
t.StructField('extra_col', t.StringType(), False)])
OUTPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False),
t.StructField('extra_col', t.StringType(), False),
t.StructField('value_col', t.DoubleType(), False)])
class StringMapTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spark = spark_session()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def setUp(self):
self.input = StringMapTest.spark.createDataFrame([['a', 'b']], INPUT_SCHEMA)
def test_map(self):
result = StringMap(
labels={'a': 1.0},
inputCol='key_col',
outputCol='value_col',
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_map_default_value(self):
result = StringMap(
labels={'z': 1.0},
inputCol='key_col',
outputCol='value_col',
handleInvalid='keep',
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 0.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_map_custom_default_value(self):
result = StringMap(
labels={'z': 1.0},
inputCol='key_col',
outputCol='value_col',
handleInvalid='keep',
defaultValue=-1.0
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', -1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_map_missing_value_error(self):
with self.assertRaises(Py4JJavaError) as error:
StringMap(
labels={'z': 1.0},
inputCol='key_col',
outputCol='value_col'
).transform(self.input).collect()
self.assertIn('java.util.NoSuchElementException: Missing label: a', str(error.exception))
def test_map_from_dataframe(self):
labels_df = StringMapTest.spark.createDataFrame([['a', 1.0]], 'key_col: string, value_col: double')
result = StringMap.from_dataframe(
labels_df=labels_df,
inputCol='key_col',
outputCol='value_col'
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_serialize_to_bundle(self):
string_map = StringMap(
labels={'a': 1.0},
inputCol='key_col',
outputCol='value_col',
)
pipeline = Pipeline(stages=[string_map]).fit(self.input)
serialization_dataset = pipeline.transform(self.input)
jar_file_path = _serialize_to_file(pipeline, serialization_dataset)
deserialized_pipeline = _deserialize_from_file(jar_file_path)
result = deserialized_pipeline.transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
@staticmethod
def test_validate_handleInvalid_ok():
StringMap(labels={}, handleInvalid='error')
def test_validate_handleInvalid_bad(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol=dict(), outputCol=None, handleInvalid='invalid')
def test_validate_labels_type_fails(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol=set(), outputCol=None)
def test_validate_labels_key_fails(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol={False: 0.0}, outputCol=None)
def test_validate_labels_value_fails(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol={'valid_key_type': 'invalid_value_type'}, outputCol=None)
def _serialize_to_file(model, df_for_serializing):
jar_file_path = _to_jar_file_path(
os.path.join(tempfile.mkdtemp(), 'test_serialize_to_bundle-pipeline.zip'))
SimpleSparkSerializer().serializeToBundle(model, jar_file_path, df_for_serializing)
return jar_file_path
def _to_jar_file_path(path):
return "jar:file:" + path
def _deserialize_from_file(path):
return SimpleSparkSerializer().deserializeFromBundle(path)
| 36.192593 | 107 | 0.657388 | 3,654 | 0.747851 | 0 | 0 | 245 | 0.050143 | 0 | 0 | 427 | 0.087393 |
e374829b389cef040daa81ebe91954032d3a7a55 | 72 | py | Python | __init__.py | hoel-bagard/yolact | 028fd121e94c18531243a73eb4c0d443fc38a079 | [
"MIT"
]
| null | null | null | __init__.py | hoel-bagard/yolact | 028fd121e94c18531243a73eb4c0d443fc38a079 | [
"MIT"
]
| null | null | null | __init__.py | hoel-bagard/yolact | 028fd121e94c18531243a73eb4c0d443fc38a079 | [
"MIT"
]
| null | null | null | from .predict import YolactK
from .data import *
__version__ = "0.1.0"
| 14.4 | 28 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.097222 |
e374d42c0a5b986cfc32f92436749b7345991388 | 4,210 | py | Python | stocks.py | nicojapas/algorithmic_trading | 46b2b59253638f15858e44b4ebae39eb222a4619 | [
"MIT"
]
| 1 | 2021-03-16T12:11:47.000Z | 2021-03-16T12:11:47.000Z | stocks.py | nicojapas/algorithmic_trading | 46b2b59253638f15858e44b4ebae39eb222a4619 | [
"MIT"
]
| null | null | null | stocks.py | nicojapas/algorithmic_trading | 46b2b59253638f15858e44b4ebae39eb222a4619 | [
"MIT"
]
| 1 | 2022-01-14T21:48:08.000Z | 2022-01-14T21:48:08.000Z | #!/usr/bin/env python
# coding: utf-8
# In[6]:
import pandas as pd
import io
import requests
import time
import random
# In[3]:
# gets the hidden API keys
api_key = pd.read_csv('secrets.csv').api_key.to_string().split()[1]
# In[124]:
# gets data using user's parameters
def get_data(symbol, interval):
"""
Signature: get_data(symbol, period) -> 'DataFrame'
Docstring:
Retrieves market data for the selected symbol and period.
Parameters
----------
symbol : str
The name of the equity of your choice. For example: symbol=GOOGL.
interval : str
Time interval between two consecutive data points in the time series.
The following values are supported: 1min, 5min, 15min, 30min, 60min.
Returns
-------
DataFrame
Examples
--------
>>> get_data('GOOGL', '60min')
"""
# main url or alphavantage and selection of features from user
BASE_URL = 'https://www.alphavantage.co/query?'
q = {
'function':'TIME_SERIES_INTRADAY_EXTENDED',
'symbol':symbol,
'interval':interval,
'slice':'year1month1',
'apikey':'KO4L9YMRD2VLJX8O'
}
df=pd.DataFrame()
for y in range(1,3):
for m in range(1,13):
# create 'slices' of 1 month each. has to do with how the api functions
q['slice'] = f'year{y}month{m}'
# concatenate all user's selected values into one string
q_str = "".join([i for i in [str(i) + "=" + str(q[i]) + "&" for i in q]])[:-1]
# concatenate the base alphavantage url with the user's query
url = BASE_URL + q_str
print(url)
# GET url
response = requests.get(url)
# read data into a pandas dataframe
df=pd.concat([df, pd.read_csv(io.StringIO(response.content.decode('utf-8')))], axis=0)
# because the free api has a limit of 5 calls per minute, we need to wait
time.sleep(60/5)
# returns a dataframe
return(df)
# In[125]:
# auto complete function for stocks
def auto_complete_stocks(x):
"""
Signature: auto_complete_stocks(str) -> 'json'
Docstring:
Makes use of the auto-completion function of Alpha Vantage API.
It takes the user's input and returns a json with the coincidences.
Parameters
----------
symbol : str
A string containing part of the symbol or description of the equity.
For example 'amaz' would return the symbol and description for AMZN stocks, etc.
Returns
-------
json
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={x}&datatype=json&apikey={api_key}'
response = requests.get(url).json()
return(response)
# In[ ]:
# to fetch all updated stocks and ETFs supported
def get_supported_stocks():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
See https://www.alphavantage.co/
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=LISTING_STATUS&apikey={api_key}'
response = requests.get(url)
x=pd.read_csv(io.StringIO(response.content.decode('utf-8')))
return(x)
# In[ ]:
# to fetch all updated stocks and ETFs supported
# static version loading from .csv previously downloaded
def get_supported_stocks_static():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
This 'static' version loads the list from a .csv file.
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
x = pd.read_csv('data/stocks_etfs_list.csv')
l1 = x['symbol'].to_list()
l2 = x['name'].to_list()
l3 = [str(i) + " - " + str(j) for i, j in zip(l1, l2)]
return(l1, l2, l3)
| 24.195402 | 113 | 0.611639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,897 | 0.688124 |
e3773931c3c2274119d47a9e56c7b5427c5ed618 | 241 | py | Python | python/sock-merchant.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
]
| 21 | 2015-02-09T18:08:38.000Z | 2021-11-08T15:00:48.000Z | python/sock-merchant.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
]
| 7 | 2020-04-12T23:00:19.000Z | 2021-01-30T23:44:24.000Z | python/sock-merchant.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
]
| 27 | 2015-07-22T18:08:12.000Z | 2022-02-28T19:50:26.000Z | #!/bin/python3
from collections import Counter
def pairs(socks):
return sum(list(map(lambda sock: sock // 2, Counter(socks).values())))
_ = int(input().strip())
socks = list(map(int, input().strip().split(' ')))
print(pairs(socks))
| 18.538462 | 74 | 0.659751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.070539 |
e37917c4d561fd8d9c4ecc0de859e1c2d60e6398 | 1,834 | py | Python | statdpwrapper/experiments/exp_without_pp.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
]
| 13 | 2021-03-30T15:39:35.000Z | 2022-02-21T08:30:45.000Z | statdpwrapper/experiments/exp_without_pp.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
]
| null | null | null | statdpwrapper/experiments/exp_without_pp.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
]
| 4 | 2021-06-30T08:37:45.000Z | 2022-03-05T03:21:14.000Z | import os
from dpsniper.utils.my_multiprocessing import initialize_parallel_executor
from dpsniper.utils.paths import get_output_directory, set_output_directory
from statdpwrapper.algorithms_ext import *
from statdpwrapper.experiments.base import run_statdp
from statdpwrapper.experiments.mechanism_config import statdp_mechanism_map, statdp_arguments_map,\
statdp_postprocessing_map, statdp_sensitivity_map, statdp_num_inputs_map
def _get_mechanism(alg_name: str):
if alg_name not in statdp_mechanism_map:
raise ValueError("Unknown mechanism {}".format(alg_name))
return statdp_mechanism_map[alg_name]
def run_without_postprocessing(n_processes: int, out_dir: str, alg_name: str):
mechanism = _get_mechanism(alg_name)
kwargs = statdp_arguments_map[alg_name]
pp_config = statdp_postprocessing_map[alg_name]
num_inputs = statdp_num_inputs_map[alg_name]
sensitivity = statdp_sensitivity_map[alg_name]
log.configure("WARNING")
set_output_directory(out_dir)
logs_dir = get_output_directory("logs")
log_file = os.path.join(logs_dir, "original_statdp_{}_log.log".format(alg_name))
data_file = os.path.join(logs_dir, "original_statdp_{}_data.log".format(alg_name))
if os.path.exists(log_file):
log.warning("removing existing log file '%s'", log_file)
os.remove(log_file)
if os.path.exists(data_file):
log.warning("removing existing log file '%s'", data_file)
os.remove(data_file)
log.configure("INFO", log_file=log_file, data_file=data_file, file_level="INFO")
with initialize_parallel_executor(n_processes, out_dir):
# run StatDP with disabled postprocessing
pp_config.disable_pp = True
run_statdp(alg_name, mechanism, pp_config, num_inputs, sensitivity, kwargs)
log.info("finished experiment")
| 41.681818 | 99 | 0.768811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.12759 |
e37a1fef8ba0d57e0296169fc7cb4fee0cc149e2 | 1,134 | py | Python | ants/registration/symimg.py | ncullen93/ANTsPy | a4c990dcd5b7445a45ce7b366ee018c7350e7d9f | [
"Apache-2.0"
]
| 3 | 2018-06-07T19:11:47.000Z | 2019-06-10T05:24:06.000Z | ants/registration/symimg.py | ncullen93/ANTsPy | a4c990dcd5b7445a45ce7b366ee018c7350e7d9f | [
"Apache-2.0"
]
| null | null | null | ants/registration/symimg.py | ncullen93/ANTsPy | a4c990dcd5b7445a45ce7b366ee018c7350e7d9f | [
"Apache-2.0"
]
| 1 | 2019-04-04T06:18:44.000Z | 2019-04-04T06:18:44.000Z |
__all__ = ['symimg']
from tempfile import mktemp
from .reflect_image import reflect_image
from .interface import registration
from .apply_transforms import apply_transforms
from ..core import image_io as iio
def symimg(img, gs=0.25):
"""
Symmetrize an image
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') , 'float')
>>> simg = ants.symimg(img)
"""
imgr = reflect_image(img, axis=0)
imgavg = imgr * 0.5 + img
for i in range(5):
w1 = registration(imgavg, img, type_of_transform='SyN')
w2 = registration(imgavg, imgr, type_of_transform='SyN')
xavg = w1['warpedmovout']*0.5 + w2['warpedmovout']*0.5
nada1 = apply_transforms(img, img, w1['fwdtransforms'], compose=w1['fwdtransforms'][0])
nada2 = apply_transforms(img, img, w2['fwdtransforms'], compose=w2['fwdtransforms'][0])
wavg = (iio.image_read(nada1) + iio.image_read(nada2)) * (-0.5)
wavgfn = mktemp(suffix='.nii.gz')
iio.image_write(wavg, wavgfn)
xavg = apply_transforms(img, imgavg, wavgfn)
return xavg
| 27.658537 | 95 | 0.636684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.260141 |
e37b4614ad6f2375762685c14b508a5358b41194 | 184 | py | Python | CRONtest/hello.py | liu2z2/TutorCal | 41cd0272d59cd1cca439cfef178485d0d8096820 | [
"MIT"
]
| null | null | null | CRONtest/hello.py | liu2z2/TutorCal | 41cd0272d59cd1cca439cfef178485d0d8096820 | [
"MIT"
]
| null | null | null | CRONtest/hello.py | liu2z2/TutorCal | 41cd0272d59cd1cca439cfef178485d0d8096820 | [
"MIT"
]
| null | null | null | import datetime
time=datetime.datetime.today().strftime("%H-%M-%S")
text_file = open("/home/pi/TutorCal/CRONtest/"+time+".txt", "w")
text_file.write("Hello world!")
text_file.close() | 26.285714 | 64 | 0.711957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.336957 |
e37b5fbe24287ac8297fbe7f44ed3c806e40c97b | 3,775 | py | Python | src/facrecog_core.py | GaussQR/cs305-g01 | 06b1ad9ba2d05e7c76ee10eb053e9d091b070d6d | [
"MIT"
]
| null | null | null | src/facrecog_core.py | GaussQR/cs305-g01 | 06b1ad9ba2d05e7c76ee10eb053e9d091b070d6d | [
"MIT"
]
| null | null | null | src/facrecog_core.py | GaussQR/cs305-g01 | 06b1ad9ba2d05e7c76ee10eb053e9d091b070d6d | [
"MIT"
]
| null | null | null | import dlib
import face_recognition
import glob
import pickle
import cv2
import numpy as np
import os
from PIL import Image,ImageFont, ImageDraw, ImageEnhance
def add_target_faces(path):
faces = {}
for img in glob.glob(path + "/*.jpg"):
print("encoding img...")
f_image = face_recognition.load_image_file(img)
x = face_recognition.face_encodings(f_image)[0]
name = img.split('/')[1].split('.')[0]
# if faces.get(name) is None:
# faces[name] = []
faces[name] = x
with open('encoded_faces.pkl', 'wb') as fp:
pickle.dump(faces, fp)
def load_encoded_faces(path_file='encoded_faces.pkl'):
return pickle.load(open(path_file,'rb'))
def identify_faces_image(img, faces, save_output=0, isfile=0):
print(save_output)
face_enc, name_face = list(faces.values()), list(faces.keys()) # Loading face encoding along with their names.
group_img = face_recognition.load_image_file(img) if isfile == 0 else img
coordinates = face_recognition.face_locations(group_img)
face_encodings = face_recognition.face_encodings(group_img)
src_img = Image.open(img).convert("RGB") if isfile == 0 else Image.fromarray(img).convert('RGB')
draw = ImageDraw.Draw(src_img)
face_in_img = []
# print(img," contains faces: ")
for (c,each_encoding) in zip(coordinates,face_encodings):
results = face_recognition.compare_faces(face_enc, each_encoding, 0.5)
indices = [i for i, value in enumerate(results) if value == True] # Should be one
# assert(len(indices) == 1)
for index in indices:
recog_name = name_face[index]
face_in_img.append((recog_name, c))
if save_output:
draw.rectangle(((c[3],c[0]), (c[1],c[2])), outline='red')
draw.text((c[3]+1, c[2]-1), recog_name, font = ImageFont.truetype('arial.ttf', 160))
if save_output:
if 'output' not in os.listdir():
os.mkdir('output')
src_img.save('output/' + img.split('/')[-1])
return face_in_img
def identify_faces_images(path_folder, faces, save_output=0):
faces_in_folder = []
for img in glob.glob(path_folder + "/*.jpg"):
faces_in_folder.append(identify_faces_image(img, faces, save_output))
return faces_in_folder
# from google.colab.patches import cv2_imshow
def identify_faces_video(path_video, faces, show_output=0):
cap = cv2.VideoCapture(path_video)
frate = cap.get(cv2.CAP_PROP_FPS)
faces_in_video = []
i = 0
while cap.isOpened():
ret, frame = cap.read()
if ret == False: break
i += 1
if i % frate != 1: continue
# frame = cv2.resize(frame, (0, 0), fx = 0.25, fy = 0.25)
frame = np.array(frame[:, :, ::-1])
# print(frame.shape)
res = identify_faces_image(frame, faces, isfile=1) #[(name, coordin)]
faces_in_video.append(res)
if show_output:
for (name, (top, right, bottom, left)) in res:
# top, right, bottom, left = map(int, [top, right, bottom, left])
# print((top, right, bottom, left))
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
frame = frame[:, :, ::-1]
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
return faces_in_video
add_target_faces('known')
faces = load_encoded_faces('encoded_faces.pkl')
identify_faces_video('al.mp4', faces, 1) | 41.944444 | 115 | 0.614834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.162119 |
e37b884d8c5f1d75d2683c37c9063a16300e4321 | 117 | py | Python | src/aioprometheus/formats/__init__.py | jbunce12/aioprometheus | d6dec47b05cab04901ffb8d2016d659927e02311 | [
"MIT"
]
| null | null | null | src/aioprometheus/formats/__init__.py | jbunce12/aioprometheus | d6dec47b05cab04901ffb8d2016d659927e02311 | [
"MIT"
]
| null | null | null | src/aioprometheus/formats/__init__.py | jbunce12/aioprometheus | d6dec47b05cab04901ffb8d2016d659927e02311 | [
"MIT"
]
| null | null | null | from . import text
from .base import IFormatter
try:
from . import binary
except ImportError:
binary = None
| 14.625 | 28 | 0.717949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e37d41115e68a3191b6a2c67d0ba9d33fd342473 | 378 | py | Python | pulpo_forms_example/urls.py | pulpocoders/pulpo-forms-examples | 8b9121b8e323b9432d17f7fc0812405668df3b04 | [
"Apache-2.0"
]
| 3 | 2015-11-05T00:23:32.000Z | 2017-05-02T15:24:11.000Z | pulpo_forms_example/urls.py | pulpocoders/pulpo-forms-examples | 8b9121b8e323b9432d17f7fc0812405668df3b04 | [
"Apache-2.0"
]
| null | null | null | pulpo_forms_example/urls.py | pulpocoders/pulpo-forms-examples | 8b9121b8e323b9432d17f7fc0812405668df3b04 | [
"Apache-2.0"
]
| 1 | 2015-08-01T02:03:23.000Z | 2015-08-01T02:03:23.000Z | from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^example/', include('pulpo_example.urls')),
url(r'^pulpo/', include('pulpo_forms.urls'), name='base'),
url(r'^admin/', include(admin.site.urls)),
url(r'^model_field_form/$',
'pulpo_forms.views.render_form',
{'instance': 'model-field-example'}),
]
| 29.076923 | 62 | 0.656085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.42328 |
e37dccf1196dd3e409502f652bd89e454eb6a2b8 | 2,903 | py | Python | backup_tool/utils.py | tnoff/backup-tool | 114d066b0aeaa9dab9e2594f42a520839587df20 | [
"BSD-2-Clause"
]
| null | null | null | backup_tool/utils.py | tnoff/backup-tool | 114d066b0aeaa9dab9e2594f42a520839587df20 | [
"BSD-2-Clause"
]
| null | null | null | backup_tool/utils.py | tnoff/backup-tool | 114d066b0aeaa9dab9e2594f42a520839587df20 | [
"BSD-2-Clause"
]
| null | null | null | import codecs
from contextlib import contextmanager
import hashlib
import logging
from logging.handlers import RotatingFileHandler
import random
import string
from pathlib import Path
def random_string(length=32, prefix='', suffix=''):
'''
Generate random string
length : Length of string
prefix : Prefix to place before random characters
suffix : Suffix to place after random characters
'''
chars = string.ascii_lowercase + string.digits
generated = "".join(random.choice(chars) for _ in range(length - len(prefix) - len(suffix)))
return f'{prefix}{generated}{suffix}'
@contextmanager
def temp_file(directory, name=None, suffix='', delete=True):
'''
Create temporary file
name : Name of temporary file
directory : Directory for temporary files
suffix : Suffix for temporary file name ( not used if name given )
delete : Delete file after use
'''
file_path = None
directory = Path(directory)
if not directory.exists():
directory.mkdir(parents=True)
if not name:
file_path = directory / random_string(suffix=suffix)
else:
file_path = directory / name
try:
if file_path:
yield Path(file_path)
else:
yield None
finally:
if delete and file_path and file_path.exists():
file_path.unlink()
def md5(input_file, chunksize=64*1024):
'''
Get md5 base64 hash of input file
'''
hash_value = hashlib.md5()
with open(input_file, 'rb') as read:
while True:
chunk = read.read(chunksize)
if not chunk:
break
try:
hash_value.update(chunk.encode('utf-8'))
except AttributeError:
# File is likely binary
hash_value.update(chunk)
md5_value = codecs.encode(hash_value.digest(), 'base64')
# This leaves "b'<hash> at beginning, so take out first two chars
return str(md5_value).rstrip("\\n'")[2:]
def setup_logger(name, log_file_level, logging_file=None,
console_logging=True, console_logging_level=logging.INFO):
'''
Setup logging
'''
logger = logging.getLogger(name)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger.setLevel(log_file_level)
if logging_file is not None:
fh = RotatingFileHandler(logging_file,
backupCount=4,
maxBytes=((2 ** 20) * 10))
fh.setLevel(log_file_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
if console_logging:
sh = logging.StreamHandler()
sh.setLevel(console_logging_level)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
| 31.901099 | 96 | 0.613503 | 0 | 0 | 765 | 0.26352 | 781 | 0.269032 | 0 | 0 | 727 | 0.250431 |
e37ecee0bdcbaf77c2d3a8c1147419b338c8e1b7 | 1,573 | py | Python | invmonInfra/models/inventoryModels.py | jtom38/invmon-api | 28f163bef47ee5c95bac0f40198e25e44090758f | [
"MIT"
]
| null | null | null | invmonInfra/models/inventoryModels.py | jtom38/invmon-api | 28f163bef47ee5c95bac0f40198e25e44090758f | [
"MIT"
]
| 16 | 2021-12-09T06:22:29.000Z | 2022-03-25T06:26:01.000Z | invmonInfra/models/inventoryModels.py | jtom38/invmon-api | 28f163bef47ee5c95bac0f40198e25e44090758f | [
"MIT"
]
| null | null | null | from logging import lastResort
from pydantic import BaseModel
from invmonApi.database import Base
from invmonInfra.enum import InventoryLastStatusEnum
from sqlalchemy import Column, String, Boolean
from uuid import uuid4
class InventorySqlModel(Base):
__tablename__ = "inventory"
id = Column(String, primary_key=True)
enabled: bool = Column(Boolean)
itemName: str = Column(String)
lastStatus: str = Column(String)
url: str = Column(String)
class InventoryApiModel(BaseModel):
id: str
enabled: bool
itemName: str
lastStatus: str
url: str
class InventoryModel():
id: str
enabled: bool
itemName: str
lastStatus: str
url: str
def __init__(self, enabled: str, itemName: str, url: str, lastStatus: InventoryLastStatusEnum, id: str = '') -> None:
self.id: str = id
if self.id == '':
self.id = str(uuid4())
self.enabled = enabled
self.itemName = itemName
self.lastStatus = lastStatus.value
self.url = url
pass
def getApiModel(self) -> InventoryApiModel:
r = InventoryApiModel()
r.id = self.id
r.enabled = self.enabled
r.itemName = self.itemName
r.lastStatus = self.lastStatus
r.url = self.url
return r
def getSqlModel(self) -> InventorySqlModel:
r = InventorySqlModel()
r.id = self.id
r.enabled = self.enabled
r.itemName = self.itemName
r.lastStatus = self.lastStatus
r.url = self.url
return r
| 25.370968 | 121 | 0.628099 | 1,334 | 0.848061 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.009536 |
e37f1a70ff938fa436f4a4d3d93cb8fdc066ba63 | 2,201 | py | Python | chatette/parsing/lexing/rule_slot_val.py | ziligy/Chatette | 014c0b0a991bf66cb69fc6a69e0f6c298974eec9 | [
"MIT"
]
| 263 | 2018-09-06T14:46:29.000Z | 2022-03-31T08:40:19.000Z | chatette/parsing/lexing/rule_slot_val.py | ziligy/Chatette | 014c0b0a991bf66cb69fc6a69e0f6c298974eec9 | [
"MIT"
]
| 50 | 2018-09-06T14:50:18.000Z | 2021-11-16T03:54:27.000Z | chatette/parsing/lexing/rule_slot_val.py | ziligy/Chatette | 014c0b0a991bf66cb69fc6a69e0f6c298974eec9 | [
"MIT"
]
| 49 | 2018-09-18T23:15:09.000Z | 2022-03-02T11:23:08.000Z | # coding: utf-8
"""
Module `chatette.parsing.lexing.rule_slot_val`
Contains the definition of the class that represents the lexing rule
to tokenize a slot value being set within a unit rule (only for a slot).
"""
from chatette.parsing.lexing.lexing_rule import LexingRule
from chatette.parsing.lexing import LexicalToken, TerminalType
from chatette.parsing.utils import find_next_comment, SLOT_VAL_SYM
class RuleSlotVal(LexingRule):
def _apply_strategy(self, **kwargs):
"""
`kwargs` can contain a boolean with key `parsing_slot_def` that is
`True` if the current text is part of a slot definition.
If this boolean is not in `kwargs`, defaults to `False`.
"""
parsing_slot_def = kwargs.get("parsing_slot_def", False)
if parsing_slot_def:
while self._text[self._next_index].isspace():
self._next_index += 1
self._update_furthest_matched_index()
if self._text.startswith(SLOT_VAL_SYM, self._next_index):
self._tokens.append(
LexicalToken(TerminalType.slot_val_marker, SLOT_VAL_SYM)
)
self._next_index += 1
self._update_furthest_matched_index()
while self._text[self._next_index].isspace():
self._next_index += 1
self._update_furthest_matched_index()
comment_sym = find_next_comment(self._text, self._next_index)
if comment_sym is not None:
slot_value = \
self._text[self._next_index:comment_sym].rstrip()
else:
slot_value = self._text[self._next_index:].rstrip()
self._tokens.append(
LexicalToken(TerminalType.slot_val, slot_value)
)
self._next_index += len(slot_value)
self._update_furthest_matched_index()
return True
return False
else:
raise ValueError(
"Tried to extract a slot value within a rule that is not " + \
"part of a slot definition."
)
| 37.948276 | 78 | 0.598364 | 1,795 | 0.815538 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.243071 |
e37fa4448f670de81c2e240c869389511aaf6b49 | 441 | py | Python | python算法/6.5如何根据已知随机数生成函数计算新的随机数.py | RobinYaoWenbin/Python-CommonCode | 1ee714541f2fd9c8b96d018d3d4eb94f4edc812a | [
"MIT"
]
| 12 | 2020-09-28T03:25:03.000Z | 2022-03-20T07:44:09.000Z | python算法/6.5如何根据已知随机数生成函数计算新的随机数.py | RobinYaoWenbin/Python-CommonCode | 1ee714541f2fd9c8b96d018d3d4eb94f4edc812a | [
"MIT"
]
| null | null | null | python算法/6.5如何根据已知随机数生成函数计算新的随机数.py | RobinYaoWenbin/Python-CommonCode | 1ee714541f2fd9c8b96d018d3d4eb94f4edc812a | [
"MIT"
]
| 21 | 2020-03-19T00:44:35.000Z | 2022-01-30T03:46:18.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 20:28:51 2020
@author: Administrator
"""
"""
已知随机数rand7()产生的随机数是整数1~7的均匀分布,如何构造rand10()函数,使其产生的随机数是整数1-10的均匀分布.
"""
import random
def rand7():
return random.randint(1,7)
def rand10():
x = 0
while True:
x = (rand7() - 1) * 7 + rand7()
if x <= 40:
break
return x % 10 + 1
if __name__ == "__main__":
print(rand10()) | 16.961538 | 67 | 0.535147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.502868 |
e380169c4e3938481f564cc3fc99e33f8bdaa725 | 26,432 | py | Python | pytropos/internals/values/python_values/python_values.py | helq/pytropos | 497ed5902e6e4912249ca0a46b477f9bfa6ae80a | [
"MIT"
]
| 4 | 2019-10-06T18:01:24.000Z | 2020-07-03T05:27:35.000Z | pytropos/internals/values/python_values/python_values.py | helq/pytropos | 497ed5902e6e4912249ca0a46b477f9bfa6ae80a | [
"MIT"
]
| 5 | 2021-06-07T15:50:04.000Z | 2021-06-07T15:50:06.000Z | pytropos/internals/values/python_values/python_values.py | helq/pytropos | 497ed5902e6e4912249ca0a46b477f9bfa6ae80a | [
"MIT"
]
| null | null | null | from abc import ABC, abstractmethod
from enum import Enum
from functools import partial
# from math import isinf
from typing import Union, Optional, Any
from typing import Callable, Tuple, Dict, List, Set, Type # noqa: F401
from ..builtin_values import Bool, ops_symbols
from ..abstract_value import AbstractValue
from ...abstract_domain import AbstractDomain
from ...errors import TypeCheckLogger
from .objects_ids import new_id
from ...miscelaneous import Pos
__all__ = ['PythonValue', 'PT', 'AbstractMutVal', 'Args']
class PT(Enum):
"""Python types supported in pytropos"""
# Undefined = 0
Top = 1
# Bottom = 2
InConstruction = 11
class PythonValue(AbstractDomain):
def __init__(self,
val: Union[AbstractValue, PT] = PT.Top
) -> None:
self.val = val
__top = None # type: PythonValue
@classmethod
def top(cls) -> 'PythonValue':
"""Returns the Top element from the lattice: Any?"""
if cls.__top is None:
cls.__top = PythonValue(PT.Top)
return cls.__top
def is_top(self) -> 'bool':
"""Returns True if this object is the top of the lattice, ie, if Any?"""
return self.val is PT.Top
def join(self, other: 'PythonValue') -> 'PythonValue':
if self.val is PT.Top or other.val is PT.Top:
return PythonValue.top()
assert isinstance(self.val, AbstractValue)
assert isinstance(other.val, AbstractValue)
if type(self.val) is type(other.val): # noqa: E721
return PythonValue(self.val.join(other.val))
return PythonValue.top()
def widen_op(self, other: 'PythonValue') -> 'Tuple[PythonValue, bool]':
# eg: PythonValue(Int(5)) == PythonValue(Int(5))
if self == other:
return self, True
# eg: PythonValue(PT.Top) and PythonValue(Int(5))
if self.val is PT.Top or other.val is PT.Top:
return PythonValue.top(), False
# eg: PythonValue(Float(3)) and PythonValue(Int(5))
if type(self.val) is not type(other.val): # noqa: E721
return PythonValue.top(), False
assert isinstance(self.val, AbstractValue)
assert isinstance(other.val, AbstractValue)
# eg: PythonValue(List([3])) and PythonValue(List([3,5]))
if self.__op_in_abstractvalue_overwritten(self.val.widen_op):
new_val, fix = self.val.widen_op(other.val)
# eg: PythonValue(Int(3)) and PythonValue(Int(5))
else:
new_val = self.val.join(other.val)
# TODO(helq): This is not how a widening operator is defined, actually we
# compare with <= not == !!!
fix = new_val == self.val
return PythonValue(new_val), fix
def is_mut(self) -> 'bool':
"""Checks if the object is mutable"""
return isinstance(self.val, AbstractMutVal)
@property
def mut_id(self) -> 'int':
"""Returns id of object if it is mutable"""
assert isinstance(self.val, AbstractMutVal)
return self.val.mut_id
def copy_mut(self,
mut_heap: 'Dict[int, PythonValue]'
) -> 'PythonValue':
"""Copies a mutable object recursively"""
assert isinstance(self.val, AbstractMutVal)
if self.is_top():
return self
if self.mut_id in mut_heap:
return mut_heap[self.mut_id]
else:
new_obj = mut_heap[self.mut_id] = PythonValue(PT.InConstruction)
new_obj.val = self.val.copy_mut(mut_heap)
return new_obj
def convert_into_top(self, converted: 'Set[int]') -> None:
"""Makes the underlying AbstractMutVal Top"""
assert isinstance(self.val, AbstractMutVal)
self.val.convert_into_top(converted)
self.val = self.val.top()
def new_vals_to_top(
self,
mut_heap: 'Dict[Tuple[str, int], Tuple[int, int, PythonValue]]',
side: str
) -> None:
"""Makes a mutable object Top"""
assert isinstance(self.val, AbstractMutVal)
self.val.new_vals_to_top(mut_heap, side)
def join_mut(self,
other: 'PythonValue',
mut_heap: 'Dict[Tuple[str, int], Tuple[int, int, PythonValue]]'
) -> 'PythonValue':
"""Joining two mutable PythonValues"""
assert isinstance(self.val, AbstractMutVal)
assert isinstance(other.val, AbstractMutVal)
left_iden = ('left', self.mut_id)
right_iden = ('right', other.mut_id)
# Checking if we have encounter already this value
if (left_iden in mut_heap) or (right_iden in mut_heap):
# self and other have already been joined
if (left_iden in mut_heap) and mut_heap[left_iden][1] == other.mut_id:
# assert right_iden in mut_heap
assert mut_heap[right_iden][0] == self.mut_id
assert mut_heap[right_iden][2] is mut_heap[left_iden][2]
return mut_heap[left_iden][2]
# left has been already been joined with other object
else:
self.new_vals_to_top(mut_heap, 'left')
other.new_vals_to_top(mut_heap, 'right')
return PythonValue.top()
if type(self.val) is not type(other.val): # noqa: E721
self.new_vals_to_top(mut_heap, 'left')
other.new_vals_to_top(mut_heap, 'right')
return PythonValue.top()
# If the value is top the result its top
if self.val.is_top():
other.new_vals_to_top(mut_heap, 'right')
return PythonValue(self.val.top())
if other.val.is_top():
self.new_vals_to_top(mut_heap, 'right')
return PythonValue(self.val.top())
new_obj = PythonValue(PT.InConstruction)
mut_heap[left_iden] = mut_heap[right_iden] = \
(self.mut_id, other.mut_id, new_obj)
new_val = self.val.join_mut(other.val, mut_heap)
if new_obj.val == PT.InConstruction:
new_obj.val = new_val
# Notice that we don't change the value of the Object if it is not InConstruction.
# If a PythonValue is not anymore in construction it means that it has been made
# "top" by some call before it
return new_obj
# TODO(helq): This equality function is faulty (because of the underlying mutable
# variables). An equality function should be defined in Store, not here, to compare
# two different Stores. Similar to how `join_mut` is defined
def __eq__(self, other: Any) -> 'bool':
if self is other:
return True
if not isinstance(other, PythonValue):
return False
return self.val == other.val
__repr_visited = set() # type: Set[int]
def __repr__(self) -> str:
if self.val is PT.Top:
return "Top"
elif self.val is PT.InConstruction:
return "InConstruction"
else: # self.type is PT.Top
assert not isinstance(self.val, PT)
if self.is_mut():
if self.mut_id in self.__repr_visited:
return 'Ref'
else:
self.__repr_visited.add(self.mut_id)
r = self.val.abstract_repr
self.__repr_visited.remove(self.mut_id)
return r
else:
return self.val.abstract_repr
# TODO(helq): Improve by checking if the given parameters correspond to the arguments
# the function receives, if not return Top
def call(self,
store: Any,
args: 'Args',
pos: Optional[Pos] = None) -> 'PythonValue':
if self.is_top():
return PythonValue.top()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"The type is {type(self.val)} but should have been an AbstractValue"
call_method = self.val.fun_call
if self.__op_in_abstractvalue_overwritten(call_method):
newval = call_method(store, args, pos) # type: PythonValue
assert isinstance(newval, PythonValue), "A function call didn't return a PythonValue"
else:
TypeCheckLogger().new_warning(
"E016",
f"TypeError: '{self.val.type_name}' object is not callable",
pos)
newval = PythonValue.top()
return newval
@property
def attr(self) -> 'AttrsContainer':
if self.is_top():
return AttrsTopContainer()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"The type is {type(self.val)} but should have been an AbstractValue"
call_method = self.val.get_attrs
if self.__op_in_abstractvalue_overwritten(call_method):
return call_method() # type: ignore
else:
return AttrsTopContainer()
def subs(self, pos: 'Optional[Pos]' = None) -> 'SubscriptsContainer':
if self.is_top():
return SubscriptsTopContainer()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"The type is {type(self.val)} but should have been an AbstractValue"
call_method = self.val.get_subscripts
if self.__op_in_abstractvalue_overwritten(call_method):
return call_method(pos) # type: ignore
else:
TypeCheckLogger().new_warning(
"E015",
f"TypeError: '{self.val.type_name}' object is not subscriptable",
pos)
return SubscriptsTopContainer()
def __getattr__(self, name: str) -> Any:
# Checking if name is add, mul, truediv
if name in ops_symbols.keys():
return partial(self.operate, name)
raise AttributeError(f"PythonValue has no attribute called '{name}'")
@staticmethod
def __op_in_abstractvalue_overwritten(method: Any) -> 'bool':
"""Checks whether the method (defined in AbstractValue) was overwriten or not"""
notoverwritten = hasattr(method, '__qualname__') and \
method.__qualname__.split('.')[0] == "AbstractValue"
return not notoverwritten # ie, True if method overwritten
def operate(self, op: str, other: 'PythonValue', pos: Optional[Pos] = None) -> 'PythonValue':
op_sym = ops_symbols[op]
if self.val is PT.Top or other.val is PT.Top:
return PythonValue.top()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"Left type is {type(self.val)} but should have been an AbstractValue"
assert isinstance(other.val, AbstractValue), \
f"Left type is {type(other.val)} but should have been an AbstractValue"
# If both values have the same type use val.op_add(otherval)
if type(self.val) is type(other.val): # noqa: E721
# Checking if op_add has been overwritten by the class that has been called
# If it hasn't, the operation result is Top
op_method = getattr(self.val, f'op_{op}')
if self.__op_in_abstractvalue_overwritten(op_method):
newval = op_method(other.val, pos)
else:
TypeCheckLogger().new_warning(
"E009",
f"TypeError: unsupported operand type(s) for {op_sym}: "
f"'{self.val.type_name}' and '{other.val.type_name}'",
pos)
newval = PT.Top
# If values have different type use val.op_add_OtherType(otherval)
# or otherval.op_add_Type(val)
else:
leftOpName = "op_r{op}_{class_name}".format(op=op, class_name=type(self.val).__name__)
rightOpName = "op_{op}_{class_name}".format(op=op, class_name=type(other.val).__name__)
try:
newval = getattr(self.val, rightOpName)(other.val, pos)
except AttributeError:
try:
newval = getattr(other.val, leftOpName)(self.val, pos)
except AttributeError:
TypeCheckLogger().new_warning(
"E009",
f"TypeError: unsupported operand type(s) for {op_sym}: "
f"'{self.val.type_name}' and '{other.val.type_name}'",
pos)
newval = PT.Top
if newval is None:
return PythonValue.top()
return PythonValue(newval)
def bool(self, pos: Optional[Pos] = None) -> 'PythonValue':
"""method documentation"""
if isinstance(self.val, Bool):
return self
if self.val is PT.Top:
return PythonValue(Bool.top())
assert isinstance(self.val, AbstractValue)
op_method = self.val.op_bool
if self.__op_in_abstractvalue_overwritten(op_method):
bool_val = op_method(pos)
# Checking bool_val is a boolean!
if not isinstance(bool_val, Bool):
TypeCheckLogger().new_warning(
"E010",
f"TypeError: __bool__ should return bool, returned {bool_val.val.type_name}",
pos)
return PythonValue(Bool.top())
return PythonValue(bool_val)
# TODO(helq): If the operation was not defined more stuff is to be done, like
# checking __len__.
# More info: https://docs.python.org/3/reference/datamodel.html#object.__bool__
return PythonValue(Bool.top())
def type(self) -> str:
"""Returns the type of the value hold self.val"""
if self.val is PT.Top:
return "Top"
elif self.val is PT.InConstruction:
return "InConstruction"
else: # self.type is PT.Top
assert not isinstance(self.val, PT)
return str(self.val.type_name)
def __lt__(self, other: 'PythonValue') -> '__builtins__.bool':
if self.is_top():
return False
elif other.is_top():
return True
assert isinstance(self.val, AbstractValue)
assert isinstance(other.val, AbstractValue)
if type(self.val) is not type(other.val): # noqa: E721
return False
try:
return bool(self.val < other.val) # type: ignore
except TypeError:
# TODO(helq): Add warning saying that comparing this two elements is not fully
# supported and may be very slow
pass
# Two know if a value in a Lattice is bigger than the other one can do:
# join(self, other) == other
if isinstance(self.val, AbstractMutVal):
joining = self.join_mut(other, {}).val
else:
joining = self.val.join(other.val)
return bool(joining == other.val)
class AbstractMutVal(AbstractValue):
"""An AbstractValue that allows mutability"""
def __init__(self, children: 'Optional[Dict[Any, PythonValue]]' = None) -> None:
"""Init must always be called
All attributes and values must be stored into `children`"""
self.__mut_id = new_id() # type: int
self.children = {} if children is None else children
@property
def mut_id(self) -> 'int':
"""Unique id of object"""
return self.__mut_id
__eq_visited = ({}, {}) # type: Tuple[Dict[int, int], Dict[int, int]]
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, AbstractMutVal):
return False
if self.mut_id in AbstractMutVal.__eq_visited[0]:
return AbstractMutVal.__eq_visited[0][self.mut_id] == other.mut_id
if other.mut_id in AbstractMutVal.__eq_visited[1]:
return AbstractMutVal.__eq_visited[1][other.mut_id] == self.mut_id
AbstractMutVal.__eq_visited[0][self.mut_id] = other.mut_id
AbstractMutVal.__eq_visited[1][other.mut_id] = self.mut_id
eq = self.children == other.children
del AbstractMutVal.__eq_visited[0][self.mut_id]
del AbstractMutVal.__eq_visited[1][other.mut_id]
return eq
def convert_into_top(self, converted: 'Set[int]') -> None:
"""Makes all children objects connected to this into Top"""
if self.mut_id in converted:
return
converted.add(self.mut_id)
children = self.children
for k, v in children.items():
if v.is_mut():
assert isinstance(v.val, AbstractMutVal)
v.convert_into_top(converted)
children.clear()
def new_vals_to_top(
self,
mut_heap: 'Dict[Tuple[str, int], Tuple[int, int, PythonValue]]',
side: str
) -> None:
"""Makes all new children objects connected to this into Top"""
obj_iden = (side, self.mut_id)
val_children = self.children
self_topped = False
if obj_iden in mut_heap:
new_val = mut_heap[obj_iden][2]
if not new_val.is_top():
new_val.val = PT.Top
self_topped = True
else:
mut_heap[obj_iden] = (self.mut_id, -1, PythonValue.top())
self_topped = True
if self_topped:
children = dict(val_children)
for k, v in children.items():
if v.is_mut():
assert isinstance(v.val, AbstractMutVal)
v.val.new_vals_to_top(mut_heap, side)
def copy_mut(self, mut_heap: 'Dict[int, PythonValue]') -> 'Any':
"""Makes a copy of the current AbstractMutVal.
It must be overwritten to add stuff that is not children (PythonValue's)"""
if self.is_top():
return self
assert len(mut_heap) > 0 \
and self.mut_id in mut_heap, \
"copy_mut cannot be called with an empty mut_heap!"
children = dict(self.children)
for k, v in children.items():
if v.is_mut():
children[k] = v.copy_mut(mut_heap)
cls = type(self)
return cls(children=children)
def join(self, other: 'Any') -> 'Any':
"""Join should never be called.
It is strange to have an AbstractValue (AbstractDomain) which doesn't not define a
`join` operation. The reason is that this class is very tightly coupled to
PythonValue. PythonValue is who actually implements the functionality of joining
AbstractMutVals"""
raise NotImplementedError()
# TODO(helq): any children that doesn't appear on both branches should produce a
# warning
def join_mut(self,
other: 'Any',
mut_heap: 'Dict[Tuple[str, int], Tuple[int, int, PythonValue]]',
) -> 'Any':
"""Joins both values including their children"""
assert not self.is_top() and not other.is_top()
assert len(mut_heap) > 0 \
and ('left', self.mut_id) in mut_heap \
and ('right', other.mut_id) in mut_heap, \
"join_mut cannot be called with an empty mut_heap!"
left_children = self.children
right_children = other.children
new_children = {} # Dict[Any, PythonValue]
# almost same code as found in store join
for k in set(left_children).union(right_children):
# The key is only in the left children
if k not in right_children:
# handling the mutable case
left_val = left_children[k]
if left_val.is_mut():
left_val.new_vals_to_top(mut_heap, "left")
new_children[k] = PythonValue.top()
# The key is only in the right store
elif k not in left_children:
# handling the mutable case
right_val = right_children[k]
if right_val.is_mut():
right_val.new_vals_to_top(mut_heap, "right")
new_children[k] = PythonValue.top()
# the key is only in right children
else:
val1 = left_children[k]
val2 = right_children[k]
if val1.is_mut():
if val2.is_mut(): # both (val1 and val2) are mutable
new_children[k] = val1.join_mut(val2, mut_heap)
else: # val1 mutable, val2 not mutable
val1.new_vals_to_top(mut_heap, 'left')
new_children[k] = PythonValue.top()
else:
if val2.is_mut(): # val1 not mutable, val2 mutable
val2.new_vals_to_top(mut_heap, 'right')
new_children[k] = PythonValue.top()
else: # both (val1 and val2) are not mutable
new_children[k] = val1.join(val2)
cls = type(self)
return cls(children=new_children)
def get_attrs(self) -> 'AttrsContainer':
if self.is_top():
return AttrsTopContainer()
return AttrsMutContainer(self.type_name, self.children)
class Args:
def __init__(
self,
vals: 'Tuple[PythonValue, ...]',
args: 'Optional[PythonValue]' = None,
kargs: 'Optional[Dict[str, PythonValue]]' = None
) -> None:
"""Basic support for arguments to pass to a function"""
self.vals = vals
self.args = args
self.kargs = kargs
class AttrsContainer(ABC):
"""This class acts as a Dict[str, PythonValue]"""
@abstractmethod
def __getitem__(self, key_: 'Union[str, Tuple[str, Pos]]') -> PythonValue:
raise NotImplementedError()
@abstractmethod
def __delitem__(self, key_: 'Union[str, Tuple[str, Pos]]') -> None:
raise NotImplementedError()
@abstractmethod
def __setitem__(self, key_: 'Union[str, Tuple[str, Pos]]', val: PythonValue) -> None:
raise NotImplementedError()
class AttrsMutContainer(AttrsContainer):
"""This class acts as a Dict[str, PythonValue] but it's defined to access and modify
AbstractMutVals
Attributes:
- type_name: The name of the object from which the attributes are being taken
- children: Dictionary with all the references to other PythonValues
- non_mut_attrs: Dictionary with all python references that are created on the spot,
i.e., Not Methods!
- read_only: Signals whether the attributes of the AbstractMutVal are writable"""
def __init__(
self,
type_name: str,
children: 'Dict[Any, PythonValue]',
non_mut_attrs: 'Optional[Dict[Any, Callable[[], PythonValue]]]' = None,
read_only: bool = False
) -> None:
self.type_name = type_name
self.children = children
self.read_only = read_only
self.non_mut_attrs = {} if non_mut_attrs is None else non_mut_attrs
def __getitem__(self, key_: 'Union[str, Tuple[str, Pos]]') -> PythonValue:
if not isinstance(key_, tuple):
key = key_
src_pos = None # type: Optional[Pos]
else:
key, src_pos = key_
if key in self.non_mut_attrs:
return self.non_mut_attrs[key]()
try:
return self.children[('attr', key)]
except KeyError:
TypeCheckLogger().new_warning(
"E011",
f"AttributeError: '{self.type_name}' object has no attribute '{key}'",
src_pos)
return PythonValue.top()
def __setitem__(self,
key_: 'Union[str, Tuple[str, Pos]]',
val: PythonValue) -> None:
if not isinstance(key_, tuple):
key = key_
src_pos = None # type: Optional[Pos]
else:
key, src_pos = key_
if self.read_only or key in self.non_mut_attrs:
TypeCheckLogger().new_warning(
"E012",
f"AttributeError: '{self.type_name}' object attribute '{key}' is read-only",
src_pos)
else:
self.children[('attr', key)] = val
def __delitem__(self, key_: 'Union[str, Tuple[str, Pos]]') -> None:
if not isinstance(key_, tuple):
key = key_
src_pos = None # type: Optional[Pos]
else:
key, src_pos = key_
if self.read_only or key in self.non_mut_attrs:
TypeCheckLogger().new_warning(
"E012",
f"AttributeError: '{self.type_name}' object attribute '{key}' is read-only",
src_pos)
else:
try:
del self.children[('attr', key)]
except KeyError:
TypeCheckLogger().new_warning("E013", f"AttributeError: '{key}'", src_pos)
class AttrsTopContainer(AttrsContainer):
"""This class acts as a Dict[str, PythonValue] that does nothing or returns PV.top()"""
def __getitem__(self, key_: 'Union[str, Tuple[str, Pos]]') -> PythonValue:
return PythonValue.top()
def __delitem__(self, key_: 'Union[str, Tuple[str, Pos]]') -> None:
pass
def __setitem__(self, key_: 'Union[str, Tuple[str, Pos]]', val: PythonValue) -> None:
pass
class SubscriptsContainer(ABC):
"""This class acts as a Dict[PythonValue, PythonValue]"""
@abstractmethod
def __getitem__(self, key_: PythonValue) -> PythonValue:
raise NotImplementedError()
@abstractmethod
def __delitem__(self, key_: PythonValue) -> None:
raise NotImplementedError()
@abstractmethod
def __setitem__(self, key_: PythonValue, val: PythonValue) -> None:
raise NotImplementedError()
class SubscriptsTopContainer(SubscriptsContainer):
"""This class acts as a Dict[PythonValue, PythonValue] but returns PV.top() or does
nothing"""
def __getitem__(self, key_: PythonValue) -> PythonValue:
return PythonValue.top()
def __delitem__(self, key_: PythonValue) -> None:
pass
def __setitem__(self, key_: PythonValue, val: PythonValue) -> None:
pass
| 37.019608 | 99 | 0.589967 | 25,880 | 0.979116 | 0 | 0 | 2,115 | 0.080017 | 0 | 0 | 7,752 | 0.293281 |
e3819ef8cd2690861dd5dfa539b9d90716dabcd3 | 2,249 | py | Python | datasets/utils_cifar10.py | jbinas/fortified-networks | 7db626075a019a6a7d8e2cb7d3a97404a1124c69 | [
"MIT"
]
| 5 | 2018-10-29T20:21:58.000Z | 2021-11-19T08:58:18.000Z | datasets/utils_cifar10.py | yaya20160101/fortified-networks | 7db626075a019a6a7d8e2cb7d3a97404a1124c69 | [
"MIT"
]
| null | null | null | datasets/utils_cifar10.py | yaya20160101/fortified-networks | 7db626075a019a6a7d8e2cb7d3a97404a1124c69 | [
"MIT"
]
| 5 | 2018-06-29T00:37:56.000Z | 2021-05-28T04:00:55.000Z | import keras
import tensorflow as tf
import numpy.random as rng
from keras.datasets import cifar10
from keras.utils import np_utils
def data_cifar10(**kwargs):
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
tpermutation = rng.permutation(X_test.shape[0])
X_test = X_test[tpermutation]
y_test = y_test[tpermutation]
permutation = rng.permutation(X_train.shape[0])
X_train = X_train[permutation]
y_train = y_train[permutation]
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def preprocess_image(image, is_training):
_HEIGHT=32
_WIDTH=32
_DEPTH=3
if is_training:
"""Preprocess a single image of layout [height, width, depth]."""
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(
image, _HEIGHT + 8, _WIDTH + 8)
# Randomly crop a [_HEIGHT, _WIDTH] section of the image.
image = tf.random_crop(image, [_HEIGHT, _WIDTH, _DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
| 29.207792 | 74 | 0.678524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.244108 |
e382e90eebc5900ceec8c6969e8c5a01efb198a6 | 93 | py | Python | gorden_crawler/utils/country.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
]
| 2 | 2019-02-22T13:51:08.000Z | 2020-08-03T14:01:30.000Z | gorden_crawler/utils/country.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
]
| null | null | null | gorden_crawler/utils/country.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
]
| 1 | 2020-08-03T14:01:32.000Z | 2020-08-03T14:01:32.000Z | #!/usr/bin/python
#-*- coding:utf-8 -*-
from . import const
const.UK = 'UK'
const.US = 'US'
| 13.285714 | 21 | 0.591398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.494624 |
e3850a968708849ad26d08ad8495038926eabd09 | 248 | py | Python | 9bus.py | sayonsom/Canvass | e59cd68f26722144abc5caf2d7ae1e7389c39ad1 | [
"MIT"
]
| 9 | 2018-01-29T10:53:25.000Z | 2021-02-21T19:35:23.000Z | 9bus.py | cyberange-dev0ps/Canvass | e59cd68f26722144abc5caf2d7ae1e7389c39ad1 | [
"MIT"
]
| 1 | 2019-06-04T14:43:34.000Z | 2021-07-09T08:35:13.000Z | 9bus.py | cyberange-dev0ps/Canvass | e59cd68f26722144abc5caf2d7ae1e7389c39ad1 | [
"MIT"
]
| 12 | 2017-05-04T23:39:10.000Z | 2021-09-25T17:05:00.000Z | # There are copyright holders.
import pandapower as pp
import pandapower.networks as pn
net = pn.case9()
pp.runpp(net)
print ("Canvass NR Power Flow Results At The Buses")
print ("------------------------------------------")
print (net.res_bus) | 22.545455 | 52 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.479839 |
e388be8ed758ecb9a717bada7d2953a7819ac2aa | 315 | py | Python | archeutils/urls.py | acdh-oeaw/acdh-django-archeutils | d1d560ce739d3e2eeddd080c4d96e7482fefbbc5 | [
"MIT"
]
| null | null | null | archeutils/urls.py | acdh-oeaw/acdh-django-archeutils | d1d560ce739d3e2eeddd080c4d96e7482fefbbc5 | [
"MIT"
]
| null | null | null | archeutils/urls.py | acdh-oeaw/acdh-django-archeutils | d1d560ce739d3e2eeddd080c4d96e7482fefbbc5 | [
"MIT"
]
| null | null | null | from django.urls import include, path
from . import arche_rdf_views
app_name = "archeutils"
urlpatterns = [
path('<app_name>/<model_name>/<pk>', arche_rdf_views.res_as_arche_graph, name='res_as_arche_graph'),
path('<app_name>/<model_name>', arche_rdf_views.qs_as_arche_graph, name='qs_as_arche_graph'),
]
| 31.5 | 104 | 0.75873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.336508 |
8b4c4b1f780806fe28ac378ba5bc5176a6a833d9 | 763 | py | Python | applications/createEVENT/SimCenterEvent.py | fmckenna/EE-UQ | a1fe96fd000aec933430bda5829c82b5743338c3 | [
"BSD-2-Clause"
]
| 1 | 2019-04-30T19:38:17.000Z | 2019-04-30T19:38:17.000Z | applications/createEVENT/SimCenterEvent.py | s-m-amin-ghasemi/EE-UQ | 7eb42d09b59b42fd1256c6d8693cfe46e0b8034b | [
"BSD-2-Clause"
]
| 2 | 2018-09-11T01:32:27.000Z | 2018-09-11T23:08:06.000Z | applications/createEVENT/SimCenterEvent.py | s-m-amin-ghasemi/EE-UQ | 7eb42d09b59b42fd1256c6d8693cfe46e0b8034b | [
"BSD-2-Clause"
]
| 6 | 2018-05-14T21:45:24.000Z | 2018-10-04T18:13:42.000Z | import sys
from shutil import copyfile
def main():
inputArgs = sys.argv
#First let's process the arguments
argBIM = inputArgs.index("--filenameBIM") + 1
bimFile = inputArgs[argBIM]
argEVENT = inputArgs.index("--filenameEVENT") + 1
eventFile = inputArgs[argEVENT]
argInputFile = inputArgs.index("--fileName") + 1
inputFile = inputArgs[argInputFile]
# only copy file if --getRV, which occurs when argc == 10
argc = len(sys.argv)
if (argc == 10):
if (inputFile != eventFile):
copyfile(inputFile, eventFile)
print("Copied File: %s to %s\n",inputFile, eventFile)
else:
print("FIle not copied: %s to %s\n",inputFile, eventFile)
if __name__== "__main__":
main()
| 29.346154 | 69 | 0.626474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.260813 |
8b4cf66930d071ee4505d81a0c0281d51346de46 | 384 | py | Python | zad1_12.py | kamilhabrych/python-semestr5-lista1 | 65faeffe83bcc4706b2818e2e7802d986b19244b | [
"MIT"
]
| null | null | null | zad1_12.py | kamilhabrych/python-semestr5-lista1 | 65faeffe83bcc4706b2818e2e7802d986b19244b | [
"MIT"
]
| null | null | null | zad1_12.py | kamilhabrych/python-semestr5-lista1 | 65faeffe83bcc4706b2818e2e7802d986b19244b | [
"MIT"
]
| null | null | null | x = 2 ** (1/2)
y = 3 ** (1/3)
z = 5 ** (1/5)
print(x)
print(y)
print(z)
print()
if x>y and x>z:
print(x,'jest największa')
elif y>x and y>z:
print(y,'jest największa')
elif z>x and z>y:
print(z,'jest największa')
print()
if x<y and x<z:
print(x,'jest najmniejsza')
elif y<x and y<z:
print(y,'jest najmniejsza')
elif z<x and z<y:
print(z,'jest najmniejsza') | 16 | 31 | 0.585938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.27907 |
8b4d9675e98a4abeceff47ef0ef4214b548c119b | 259 | py | Python | 2-mouth02/day03/exe03.py | gary-gggggg/gary | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | [
"Apache-2.0"
]
| 4 | 2021-02-01T10:28:11.000Z | 2021-02-01T10:34:40.000Z | 2-mouth02/day03/exe03.py | gary-gggggg/gary | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | [
"Apache-2.0"
]
| null | null | null | 2-mouth02/day03/exe03.py | gary-gggggg/gary | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | [
"Apache-2.0"
]
| null | null | null | title=open("file.txt","w")
title.write("《悯农》\n" )
title.close()
sum=0
while 1:
sentence=open("file.txt","a")
sum+=1
if sum>4:
sentence.close()
break
k =input("请输入句子(包括标点符号):")
sentence.write(f"{k}\n")
sentence.close()
| 17.266667 | 33 | 0.555985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.318644 |
8b4d9f6ab5c3257761c9eb3fa1e62a13d1f8d05b | 1,635 | py | Python | scanplans/grid_scan.py | st3107/bluesky_scanplans | 2ab126c0b7f4427a10d42cf59ea004770c433383 | [
"BSD-3-Clause"
]
| null | null | null | scanplans/grid_scan.py | st3107/bluesky_scanplans | 2ab126c0b7f4427a10d42cf59ea004770c433383 | [
"BSD-3-Clause"
]
| null | null | null | scanplans/grid_scan.py | st3107/bluesky_scanplans | 2ab126c0b7f4427a10d42cf59ea004770c433383 | [
"BSD-3-Clause"
]
| null | null | null | import bluesky.plan_stubs as bps
import bluesky.plans as bp
from xpdacq.beamtime import _configure_area_det
from xpdacq.glbl import glbl
from xpdacq.xpdacq import open_shutter_stub, close_shutter_stub
from xpdacq.xpdacq_conf import xpd_configuration
def acq_rel_grid_scan(
dets: list,
exposure: float,
wait: float,
start0: float, stop0: float, num0: int,
start1: float, stop1: float, num1: int
):
"""Make a plan of two dimensional grid scan."""
area_det = xpd_configuration["area_det"]
x_controller = xpd_configuration["x_controller"]
y_controller = xpd_configuration["y_controller"]
def per_step(detectors, step: dict, pos_cache):
""" customized step to ensure shutter is open before
reading at each motor point and close shutter after reading
"""
yield from bps.checkpoint()
for motor, pos in step.items():
yield from bps.mv(motor, pos)
yield from bps.sleep(wait)
yield from open_shutter_stub()
yield from bps.sleep(glbl["shutter_sleep"])
yield from bps.trigger_and_read(list(detectors) + list(step.keys()))
yield from close_shutter_stub()
plan = bp.rel_grid_scan(
[area_det],
x_controller, start0, stop0, num0,
y_controller, start1, stop1, num1,
snake_axes=True,
per_step=per_step
)
yield from _configure_area_det(exposure)
yield from plan
# below is the code to run at the beamtime
# register the scanplan
# ScanPlan(bt, acq_rel_grid_scan, 60, 30, -5, 5, 10, -5, 5, 10)
# use bt.list() to see the index of the scanplan and use it in xrun
| 34.0625 | 76 | 0.688685 | 0 | 0 | 1,182 | 0.722936 | 0 | 0 | 0 | 0 | 427 | 0.261162 |
8b4e6b2b167aebf419baed2ece989c7a96978324 | 5,172 | py | Python | kolibri/logger/migrations/0001_initial_redone.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
]
| 1 | 2021-11-09T11:30:12.000Z | 2021-11-09T11:30:12.000Z | kolibri/logger/migrations/0001_initial_redone.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
]
| 2 | 2017-02-08T00:22:04.000Z | 2017-06-12T20:27:44.000Z | kolibri/logger/migrations/0001_initial_redone.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
]
| 1 | 2020-05-21T18:17:55.000Z | 2020-05-21T18:17:55.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-09 17:25
from __future__ import unicode_literals
import django.core.validators
import django.db.models.deletion
import kolibri.content.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('kolibriauth', '0001_initial_redone'),
]
operations = [
migrations.CreateModel(
name='ContentRatingLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_id', kolibri.content.models.UUIDField(db_index=True)),
('channel_id', kolibri.content.models.UUIDField()),
('quality', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('ease', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('learning', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('feedback', models.TextField(blank=True)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ContentSessionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_id', kolibri.content.models.UUIDField(db_index=True)),
('channel_id', kolibri.content.models.UUIDField()),
('start_timestamp', models.DateTimeField()),
('end_timestamp', models.DateTimeField(blank=True, null=True)),
('time_spent', models.FloatField(default=0.0, help_text='(in seconds)', validators=[django.core.validators.MinValueValidator(0)])),
('progress', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('kind', models.CharField(max_length=200)),
('extra_fields', models.TextField(default='{}')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ContentSummaryLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_id', kolibri.content.models.UUIDField(db_index=True)),
('channel_id', kolibri.content.models.UUIDField()),
('start_timestamp', models.DateTimeField()),
('end_timestamp', models.DateTimeField(blank=True, null=True)),
('completion_timestamp', models.DateTimeField(blank=True, null=True)),
('time_spent', models.FloatField(default=0.0, help_text='(in seconds)', validators=[django.core.validators.MinValueValidator(0)])),
('progress', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1)])),
('kind', models.CharField(max_length=200)),
('extra_fields', models.TextField(default='{}')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserSessionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channels', models.TextField(blank=True)),
('start_timestamp', models.DateTimeField(auto_now_add=True)),
('completion_timestamp', models.DateTimeField(blank=True, null=True)),
('pages', models.TextField(blank=True)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
options={
'abstract': False,
},
),
]
| 56.835165 | 176 | 0.616589 | 4,923 | 0.951856 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.173434 |
8b4e779744d51e5ebec4b797f93c9f1ab0c716a1 | 555 | py | Python | setup.py | kennydo/pick-my-stick | 17bb4fbb35cc9637a838f5bdd91caeb7458b43bd | [
"MIT"
]
| null | null | null | setup.py | kennydo/pick-my-stick | 17bb4fbb35cc9637a838f5bdd91caeb7458b43bd | [
"MIT"
]
| null | null | null | setup.py | kennydo/pick-my-stick | 17bb4fbb35cc9637a838f5bdd91caeb7458b43bd | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
setup(
name='picker-my-sticker',
version='0.0.1',
description='Stickers for Slack',
long_description='S t i c k e r s',
url='https://github.com/kennydo/pick-my-stick',
author='Kenny Do',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
packages=find_packages(),
entry_points={
},
)
| 25.227273 | 51 | 0.610811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.488288 |
8b504f70d7990baf24d2b512d627ec9c1ca831af | 223 | py | Python | src/common/json_attributes.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
]
| 1 | 2020-12-01T17:10:14.000Z | 2020-12-01T17:10:14.000Z | src/common/json_attributes.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
]
| 1 | 2021-09-19T13:38:02.000Z | 2021-09-19T13:38:02.000Z | src/common/json_attributes.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
]
| null | null | null |
class JsonAttributes:
TIMESTAMP = "timestamp"
STATE = "state"
DIM_STATE = "dim_state"
SWITCH_STATE = "switch_state"
VALUE = "value"
BUTTON = "button"
SINCE = "since"
DEVICE = "device"
| 13.117647 | 33 | 0.596413 | 221 | 0.991031 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.327354 |
8b528bad86c27520698632ef706d6564180389c3 | 10,562 | py | Python | helpers.py | TimHeiszwolf/NBPGravity | b054b189f5493ad8ec094786f16f5525c117a127 | [
"MIT"
]
| 1 | 2022-03-08T07:16:53.000Z | 2022-03-08T07:16:53.000Z | helpers.py | TimHeiszwolf/NBPGravity | b054b189f5493ad8ec094786f16f5525c117a127 | [
"MIT"
]
| null | null | null | helpers.py | TimHeiszwolf/NBPGravity | b054b189f5493ad8ec094786f16f5525c117a127 | [
"MIT"
]
| null | null | null | import numpy as np
import time
import matplotlib.pyplot as plt
import imageio
from scipy.optimize import fsolve
from body import Body
def get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, mass_orbit, G=6.67430 * 10**(-11)):
"""
Get the position vectors from the Keplerian coordinates
First part from https://downloads.rene-schwarz.com/download/M001-Keplerian_Orbit_Elements_to_Cartesian_State_Vectors.pdf
Second part from https://space.stackexchange.com/questions/19322/converting-orbital-elements-to-cartesian-state-vectors
>>> position = get_position_from_Kepler(1.5*10**8, 0.0167, (5*10**(-5))*np.pi/180, 1, 1, 190*np.pi/180, 1.988435 * (10**30))
>>> position
array([ 8.58449271e+07, -1.26004733e+08, -1.22449388e+02])
>>> np.linalg.norm(position)
152468174.39880842
"""
mu = G * mass_orbit
func = lambda EA: mean_anomaly - (EA - eccentricity * np.sin(EA))
eccentric_anomaly = fsolve(func, np.pi)[0]
true_anomaly = 2 * np.arctan2(np.sqrt(1 + eccentricity) * np.sin(eccentric_anomaly / 2), np.sqrt(1 - eccentricity) * np.cos(eccentric_anomaly / 2))
radius = semimajor_axis * (1 - eccentricity * np.cos(eccentric_anomaly))
h = np.sqrt(mu * semimajor_axis * (1 - eccentricity**2))
p = semimajor_axis * (1 - eccentricity**2)
Om = ascending_node
w = argument_of_periapsis
nu = true_anomaly
r = radius
i = inclination
e = eccentricity
x = r*(np.cos(Om)*np.cos(w+nu) - np.sin(Om)*np.sin(w+nu)*np.cos(i))
y = r*(np.sin(Om)*np.cos(w+nu) + np.cos(Om)*np.sin(w+nu)*np.cos(i))
z = r*(np.sin(i)*np.sin(w+nu))
#print(x, r, Om, w, nu, i, e, eccentric_anomaly)
position = np.array([x, y, z])
xd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.cos(Om)*np.sin(w+nu) + np.sin(Om)*np.cos(w+nu)*np.cos(i))
yd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.sin(Om)*np.sin(w+nu) - np.cos(Om)*np.cos(w+nu)*np.cos(i))
zd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.cos(w+nu)*np.sin(i))
velocity = np.array([xd, yd, zd])
#print(velocity)
return position
def get_coordinates_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, current_velocity, mass_orbit, G=6.67430 * 10**(-11), delta=0.001):
"""
Lol wtf pls kil me.
>>> position, velocity = get_coordinates_from_Kepler(1.5*10**8, 0.0167, (5*10**(-5))*np.pi/180, 1, 1, 190*np.pi/180, 29300, 1.988435 * (10**30))
>>> position
array([ 8.58449271e+07, -1.26004733e+08, -1.22449388e+02])
>>> velocity
array([ 2.41591639e+04, 1.65778407e+04, -9.92410781e-03])
>>> np.linalg.norm(position)
152468174.39880842
>>> np.linalg.norm(velocity)
29299.999999999993
"""
position = get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, mass_orbit, G)
position_plus_delta = get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly + delta, mass_orbit, G)
delta_position = position_plus_delta - position
direction_unit_vector = delta_position / np.linalg.norm(delta_position)
return position, current_velocity * direction_unit_vector
def ld_to_m(ld):
"""
Converts the input distance (or velocity) of the input from Lunar distances to meters.
"""
return ld * 384402 * 10**3
def au_to_m(au):
"""
Converts the input distance (or velocity) of the input from atronomical units to meters.
"""
return au * 1.495978707 * 10**11
def ly_to_m(ly):
"""
Converts the input distance (or velocity) of the input from light years to meters.
"""
return ly * 9.4607 * 10**15
def pc_to_m(pc):
"""
Converts the input distance (or velocity) of the input from parsec to meters.
"""
return pc * 3.085677581 * 10**18
def make_gif(bodies, trail_length, tick_per_frame=10, frames_per_second=5, window=[[-1, 1], [-1, 1]], name='output', axis=[0, 1], labels=False):
images = []
min_trail = 0
fig = plt.figure(figsize=(16, 16))
for tick in range(0, len(bodies[0].history['time']), tick_per_frame):
if bodies[0].history['time'][0] > (bodies[0].history['time'][tick] - trail_length):
continue
print('Rendering tick:', tick)
current_time = bodies[0].history['time'][tick]
x = [body.history['position'][tick][axis[0]] for body in bodies]
y = [body.history['position'][tick][axis[1]] for body in bodies]
colors = [body.color for body in bodies]
plt.scatter(x, y, c=colors)
plt.axis((window[0][0], window[0][1], window[1][0], window[1][1]))
while bodies[0].history['time'][min_trail] + trail_length < current_time:
min_trail = min_trail + 1
for body in bodies:
if labels:
x_label = body.history['position'][tick][axis[0]]
y_label = body.history['position'][tick][axis[1]]
plt.text(x_label, y_label, body.name)
#x_trail = [body.history['position'][i][axis[0]] for i in range(tick + 1) if ((body.history['time'][i] + trail_length) >= current_time)]
#y_trail = [body.history['position'][i][axis[1]] for i in range(tick + 1) if ((body.history['time'][i] + trail_length) >= current_time)]
x_trail = [body.history['position'][i][axis[0]] for i in range(min_trail, tick + 1)]
y_trail = [body.history['position'][i][axis[1]] for i in range(min_trail, tick + 1)]
plt.plot(x_trail, y_trail, c=body.color)
plt.title(name + ' time ' + str(round(current_time, 0)))
plt.xlabel('grgr')
plt.ylabel('grgr')
image_name = name + '/' + str(tick) + '.png'
plt.savefig(image_name)
images.append(imageio.imread(image_name))
#plt.pause(0.0001)# Do we want this?
plt.clf()
print('Done rendering now saving gif.')
imageio.mimwrite(name+'.gif', images, format='.gif', fps=frames_per_second)
def simple_plotter(space, end_time, time_per_second, updates_per_second=2):
#plt.show()
lim = 1.50*10**11#max([max([abs(pos) for pos in body.position]) for body in space.bodies])
fig, ax = plt.subplots(figsize=(8, 8))
start_time = time.time()
tick = 0
#print(tick, space.time)
while space.time<=end_time:
print(tick, space.time, round(time.time()-start_time, 2), np.linalg.norm(space.bodies[1].position - space.bodies[2].position))
time.sleep(max([0.001, tick - (time.time() - start_time)]))
space.proceed_time_until(tick*time_per_second)
x = []
y = []
for body in space.bodies:
x.append(body.position[0])
y.append(body.position[1])
ax.clear()
ax.scatter(x, y, marker='o', c='r')
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
plt.pause(0.0001)
tick = tick+1/updates_per_second
plt.show()
def get_test_Space_simple_solar():
"""
Generates a simple test Space object. It is filled with the 8 plannets of the solar system (and the moon). They are position in a way that doesn't 100% correspond to reality.
"""
bodies = []
mass_orbit = 1.988435 * (10**30)
# The most important bodies.
bodies.append(Body(np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]), 1.988435 * (10**30), 695700000, 'Sun', True, 'tab:orange'))
position_earth, velocity_earth = get_coordinates_from_Kepler(1.0*1.496*10**11, 0.01671, (5*10**(-5))*np.pi/180, 0, 0, 190*np.pi/180, 29300, mass_orbit)
bodies.append(Body(position_earth, velocity_earth, 5.97 * (10**24), 6371009, 'Earth', True, 'tab:blue'))
position, velocity = get_coordinates_from_Kepler(384400*1000, 0.0554, 5.16*np.pi/180, 125*np.pi/180, 318.15*np.pi/180, 213*np.pi/180, 1020, bodies[1].mass)
position = position + position_earth
velocity = velocity + velocity_earth
bodies.append(Body(position,velocity, 7.349 * (10**22), 1737400, 'Moon', True, 'darkgrey'))
# Other inner plannets.
position, velocity = get_coordinates_from_Kepler(0.38709893*1.496*10**11, 0.20563069, 7.00487*np.pi/180, 48.33*np.pi/180, 29.12*np.pi/180, 269*np.pi/180, 45810, mass_orbit)
bodies.append(Body(position, velocity, 3.301 * (10**23), 2440000, 'Mercury', True, 'lightsteelblue'))
position, velocity = get_coordinates_from_Kepler(0.72333199*1.496*10**11, 0.00677, 3.39471*np.pi/180, 76.68069*np.pi/180, 54.85*np.pi/180, 187*np.pi/180, 34790, mass_orbit)
bodies.append(Body(position, velocity, 4.867 * (10**24), 6050000, 'Venus', True, 'goldenrod'))
position, velocity = get_coordinates_from_Kepler(1.52366*1.496*10**11, 0.09341, 1.85061*np.pi/180, 49.57*np.pi/180, 286*np.pi/180, 349*np.pi/180, 26450, mass_orbit)
bodies.append(Body(position, velocity, 6.417 * (10**23), 3390000, 'Mars', True, 'sandybrown'))
# Outer planets.
position_jupiter, velocity_jupiter = get_coordinates_from_Kepler(5.2033*1.496*10**11, 0.04839, 1.3053*np.pi/180, 100.556*np.pi/180, -85.80*np.pi/180, 283*np.pi/180, 13170, mass_orbit)
bodies.append(Body(position_jupiter, velocity_jupiter, 1.898 * (10**27), 69950000, 'Jupiter', True, 'darkorange'))
position_saturn, velocity_saturn = get_coordinates_from_Kepler(9.537*1.496*10**11, 0.0541, 2.48446*np.pi/180, 113.715*np.pi/180, -21.2831*np.pi/180, 207*np.pi/180, 91590, mass_orbit)
bodies.append(Body(position_saturn, velocity_saturn, 5.683 * (10**26), 58300000, 'Saturn', True, 'navajowhite'))
position_uranus, velocity_uranus = get_coordinates_from_Kepler(19.1912*1.496*10**11, 0.0471771, 0.76986*np.pi/180, 74.22988*np.pi/180, 96.73436*np.pi/180, 229*np.pi/180, 6578, mass_orbit)
bodies.append(Body(position_uranus, velocity_uranus, 8.681 * (10**25), 25360000, 'Uranus', True, 'powderblue'))
position_neptune, velocity_neptune = get_coordinates_from_Kepler(30.06896*1.496*10**11, 0.00858587, 1.76917*np.pi/180, 131.72169*np.pi/180, -86.75*np.pi/180, 301*np.pi/180, 5449, mass_orbit)
bodies.append(Body(position_neptune, velocity_neptune, 1.024 * (10**26), 24600000, 'Neptune', True, 'dodgerblue'))
return bodies
if __name__ == "__main__":
import doctest
doctest.testmod() | 44.944681 | 194 | 0.639841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,588 | 0.245029 |
8b569282b5d41a4fb5d9ee37ff203ff019b8b666 | 10,897 | py | Python | opttrack/lib/ui/edit_handlers.py | aisthesis/opttrack | 17e0c7740ea43e0f07166e30d689b106d0319d0b | [
"MIT"
]
| null | null | null | opttrack/lib/ui/edit_handlers.py | aisthesis/opttrack | 17e0c7740ea43e0f07166e30d689b106d0319d0b | [
"MIT"
]
| 2 | 2016-03-30T02:50:31.000Z | 2016-03-30T16:18:23.000Z | opttrack/lib/ui/edit_handlers.py | aisthesis/opttrack | 17e0c7740ea43e0f07166e30d689b106d0319d0b | [
"MIT"
]
| null | null | null | """
Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/handlers.py
Handlers for edit menu
"""
from bson.codec_options import CodecOptions
import datetime as dt
from functools import partial
import json
from pymongo.errors import BulkWriteError
from ..dbschema import SPREADS
from ..dbtools import delete_many, find_job, getcoll, insert_many
from ..dbwrapper import job
from ..spreads.optspread import SPREAD_TYPES
from ..spreads.optspread_factory import OptSpreadFactory
from .spread_ui import SpreadUi
from .utils import confirm
class EditHandlers(object):
def __init__(self, logger, tz):
self.logger = logger
self.tz = tz
def add_obs(self, spread_type):
spread = SpreadUi().get(spread_type)
if not spread:
print('\nAborting: spread NOT saved!')
return True
job(self.logger, partial(_saveentries, (vars(spread),), 'observe'))
return True
def del_obs(self, spread_type):
underlying = input('Underlying: ').strip().upper()
wrapped_spreads = self._get_observed({'Underlying': underlying,
'Spread_Type': spread_type})
if len(wrapped_spreads) == 0:
print('\nNo {} spreads found for {}'.format(SPREAD_TYPES[spread_type], underlying))
else:
self._del_obs(wrapped_spreads)
return True
def show_obs(self, spread_type):
wrapped_spreads = self._get_observed({'Spread_Type': spread_type})
if not len(wrapped_spreads):
print('\nNo {} spreads found.'.format(SPREAD_TYPES[spread_type]))
for item in wrapped_spreads:
print('')
item['spread'].show(False, False, False)
return True
def add_find(self, spread_type):
if _is_fromfile():
fname = input('Enter file name: ').strip()
equities = _eqs_fromfile(fname)
else:
equities = _eqs_fromblob(input('Underlying equities (GOOGL,TSLA,FB): '))
print('Include in future scans:\n')
for eq in equities:
print("'{}'".format(eq))
choice = input('\nOK to proceed (y/n)? ').lower()
if choice == 'y':
entries = _get_find_entries(equities, spread_type)
job(self.logger, partial(_saveentries, entries, 'find'))
else:
print('Aborting: equities NOT saved!')
return True
def del_find(self, spread_type):
equities = _eqs_fromblob(input('Underlying equities (GOOGL,TSLA,FB): '))
print('Remove from future scans:\n')
for eq in equities:
print("'{}'".format(eq))
choice = input('\nOK to proceed (y/n)? ').lower()
if choice == 'y':
entries = _get_find_entries(equities, spread_type)
job(self.logger, partial(_delentries, entries, 'find'))
else:
print('Aborting: equities NOT deleted!')
return True
def show_find(self):
for spread in SPREADS:
cursor = job(self.logger, partial(find_job, 'find', {'spread': spread['key']}))
equities = sorted([item['eq'] for item in cursor])
print('\n{}:'.format(spread['desc']))
if len(equities) > 0:
print('{} equities are being scanned'.format(len(equities)))
for equity in equities:
print("'{}'".format(equity))
else:
print('No equities are being scanned')
return True
def track_single(self):
entry = self._get_track_entry()
self._confirmsave((entry,))
return True
def track_dgb(self):
print('\nTrack diagonal butterfly:')
underlying = input('Underlying equity: ').strip().upper()
straddleexp = self._getexpdt(input('Straddle expiration (yyyy-mm-dd): '))
straddlestrike = float(input('Straddle strike: '))
farexp = self._getexpdt(input('Far expiration (yyyy-mm-dd): '))
distance = float(input('Distance between strikes: '))
entries = _get_dgbentries(underlying, straddleexp, straddlestrike, farexp, distance)
self._confirmsave(entries)
return True
def delete_tracked(self):
entry = self._get_track_entry()
self._confirmdelete(entry)
return True
def show_tracked(self):
underlying = input('Underlying equity: ').strip().upper()
job(self.logger, partial(_show_tracked, self.tz, underlying))
return True
def _del_obs(self, wrapped_spreads):
if len(wrapped_spreads) == 1:
self._del_obs_unique(wrapped_spreads[0])
else:
self._del_obs_select(wrapped_spreads)
def _del_obs_unique(self, wrapped_spread):
print('\nStop observing the following spread:\n')
wrapped_spread['spread'].show(False, False, False)
print('')
if confirm():
job(self.logger, partial(_delentries, ({'_id': wrapped_spread['_id']},), 'observe'))
else:
print('\nAborting: spread NOT deleted!')
def _del_obs_select(self, wrapped_spreads):
print('Multiple {} spreads found for {}.'.format(SPREAD_TYPES[wrapped_spreads[0]['spread'].Spread_Type],
wrapped_spreads[0]['spread'].Underlying))
print('Select spread to delete:')
for i in range(len(wrapped_spreads)):
print('\n({})'.format(i + 1))
wrapped_spreads[i]['spread'].show(False, False, False)
choice = int(input('\nEnter number for spread to delete: '))
if not 0 < choice <= len(wrapped_spreads):
print('\nInvalid selection!')
return
self._del_obs_unique(wrapped_spreads[choice - 1])
def _get_track_entry(self):
entry = {}
entry['Underlying'] = input('Underlying equity: ').strip().upper()
entry['Opt_Type'] = _getopttype(input('Option type (c[all] or p[ut]): '))
entry['Expiry'] = self._getexpdt(input('Expiration (yyyy-mm-dd): '))
entry['Strike'] = float(input('Strike: '))
return entry
def _confirmsave(self, entries):
print('\nSaving the following options:')
_show_track_entries(entries)
choice = input('\nOK to proceed (y/n)? ').lower()
if choice == 'y':
job(self.logger, partial(_saveentries, entries, 'track'))
else:
print('Aborting: option(s) NOT saved!')
def _confirmdelete(self, entry):
print('\nDeleting the following option:')
_show_track_entries((entry,))
choice = input('\nStop tracking this option (y/n)? ').lower()
if choice == 'y':
job(self.logger, partial(_delentries, (entry,), 'track'))
else:
print('Aborting: option NOT deleted!')
def _get_observed(self, qry):
spread_factory = OptSpreadFactory(self.tz)
cursor = job(self.logger, partial(find_job, 'observe', qry,
codec_options=CodecOptions(tz_aware=True)))
wrapped_spreads = []
for item in cursor:
wrapped_spreads.append({'spread': spread_factory.make(item), '_id': item['_id']})
return wrapped_spreads
def _getexpdt(self, expirytxt):
# on 2016-02-19 expired options were unavailable on yahoo by 7:30 pm EST
return self.tz.localize(dt.datetime.strptime(expirytxt, '%Y-%m-%d')).replace(hour=19)
def _getopttype(rawtxt):
if rawtxt.strip().lower() in ('c', 'call'):
return 'call'
if rawtxt.strip().lower() in ('p', 'put'):
return 'put'
raise ValueError('option type must be call or put')
def _show_track_entries(entries):
for entry in entries:
print('')
_show_track_entry(entry)
def _show_track_entry(entry):
print('Underlying: {}'.format(entry['Underlying']))
print('Opt_Type: {}'.format(entry['Opt_Type']))
print('Expiry: {}'.format(entry['Expiry'].strftime('%Y-%m-%d')))
print('Strike: {:.2f}'.format(entry['Strike']))
def _delentries(entries, collname, logger, client):
logger.info("removing {} record(s) from collection '{}'".format(len(entries), collname))
coll = getcoll(client, collname)
total_deleted = 0
for entry in entries:
n_deleted = delete_many(logger, coll, entry)
if n_deleted < 1:
logger.warn('record to be deleted not found: {}'.format(entry))
total_deleted += n_deleted
if total_deleted == len(entries):
msg = '{} record(s) deleted'.format(total_deleted)
print(msg)
else:
msg = '{} records queued for deletion but {} records were deleted!'.format(len(entries), total_deleted)
logger.warn(msg)
print('WARNING: {}'.format(msg))
print('Did you verify that the records to be deleted were actually present?')
def _saveentries(entries, collname, logger, client):
msg = 'Saving {} entries'.format(len(entries))
print(msg)
logger.info(msg)
coll = getcoll(client, collname)
try:
n_inserted = insert_many(logger, coll, entries)
except BulkWriteError:
print('\nERROR writing to database! Entries not saved!')
print('Are you trying to enter duplicate records?')
else:
print('{} records saved'.format(n_inserted))
def _show_tracked(tz, underlying, logger, client):
c_opts = CodecOptions(tz_aware=True)
trackcoll = getcoll(client, 'track', codec_options=c_opts)
print('\nEntries for {}:\n'.format(underlying))
for record in trackcoll.find({'Underlying': underlying}):
_show_tracked_record(tz, record)
def _show_tracked_record(tz, record):
print('Opt_Type: {}'.format(record['Opt_Type']))
print('Expiry: {}'.format(record['Expiry'].astimezone(tz).strftime('%Y-%m-%d')))
print('Strike: {:.2f}\n'.format(record['Strike']))
def _get_dgbentries(underlying, straddleexp, straddlestrike, farexp, distance):
entries = []
farstrikes = {'call': straddlestrike + distance, 'put': straddlestrike - distance}
for key in farstrikes:
# straddle
entries.append({'Underlying': underlying, 'Opt_Type': key, 'Expiry': straddleexp,
'Strike': straddlestrike})
# long-term spread
entries.append({'Underlying': underlying, 'Opt_Type': key, 'Expiry': farexp,
'Strike': farstrikes[key]})
return entries
def _is_fromfile():
if input('Get list from file, 1 equity per line (y/n)? ').strip().lower() == 'y':
return True
return False
def _eqs_fromblob(eqblob):
return sorted(map(_fmt_eq, eqblob.split(',')))
def _fmt_eq(rawtxt):
return rawtxt.strip().upper()
def _eqs_fromfile(fname):
equities = []
with open(fname, 'r') as infile:
equities = infile.readlines()
return sorted(map(_fmt_eq, equities))
def _get_find_entries(equities, spread_type):
return [{'eq': equity, 'spread': spread_type} for equity in equities]
| 37.968641 | 112 | 0.623383 | 6,819 | 0.625769 | 0 | 0 | 0 | 0 | 0 | 0 | 2,429 | 0.222905 |
8b58c384ea2a5cec4051907804ed34709b049103 | 4,126 | py | Python | server/music/api.py | tricelex/zc_plugin_youtube_music_video | f3389cafc9e1a6b0fd2d94e0af77e9beec678282 | [
"MIT"
]
| null | null | null | server/music/api.py | tricelex/zc_plugin_youtube_music_video | f3389cafc9e1a6b0fd2d94e0af77e9beec678282 | [
"MIT"
]
| null | null | null | server/music/api.py | tricelex/zc_plugin_youtube_music_video | f3389cafc9e1a6b0fd2d94e0af77e9beec678282 | [
"MIT"
]
| null | null | null | # class SidebarView(GenericAPIView):
# permission_classes = [AllowAny]
# def get(self, request, *args, **kwargs):
# org_id = request.GET.get("org", None)
# user_id = request.GET.get("user", None)
# room = settings.ROOM_COLLECTION
# plugin_id = settings.PLUGIN_ID
# roomid = settings.ROOM_ID
# token = verify_token
# pub_room = get_room_info()
# # subscription_channel: org_id_memberid_sidebar
# if request.GET.get("org") and request.GET.get("user"):
# subscription_channel = "{org_id}_{user_id}_sidebar"
# #sidebar_update = "currentWorkspace_userInfo_sidebar"
# sidebar_update_payload = {
# "event": "sidebar_update",
# "plugin_id": "music.zuri.chat",
# "data": {
# "name": "Music Plugin",
# "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music",
# "plugin_id": plugin_id,
# "organisation_id": org_id,
# "room_id": roomid,
# "user_id": user_id,
# "category": "entertainment",
# "group_name": "music",
# "show_group": False,
# "button_url": f"/music/{org_id}/{roomid}",
# "public_rooms": [pub_room],
# # "starred" : [],
# "joined_rooms": [pub_room],
# },
# }
# # centrifugo_post(sidebar_update_payload, subscription_channel)
# # return Response(sidebar_update_payload)
# url = "https://api.zuri.chat/sidebar?org={org_id}&user={user_id}"
# # http://127.0.0.1:8000/sidebar?org=61695d8bb2cc8a9af4833d46&user=61695d8bb2cc8a9af4833d47
# r = requests.get(url)
# # print(r.status_code)
# if r.status_code == 200:
# # public_url = f"https://api.zuri.chat/data/read/{org_id}/{plugin_id}/{room}/{roomid}"
# # r = requests.get(public_url)
# publish_to_sidebar(plugin_id, user_id, {"event": "sidebar_update", "data": pub_room})
# centrifugo_post(sidebar_update_payload, subscription_channel)
# return Response(r)
# else:
# centrifugo_post(sidebar_update_payload, subscription_channel)
# return Response(
# {
# "event": "sidebar_update",
# "name": "Music Plugin",
# "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music",
# "plugin_id": plugin_id,
# "organisation_id": org_id,
# "room_id": roomid,
# "user_id": user_id,
# "group_name": [],
# "show_group": False,
# "category": "entertainment",
# "public_rooms": [pub_room],
# "joined_rooms": [pub_room],
# }
# )
# else:
# centrifugo_post(sidebar_update_payload, subscription_channel)
# return JsonResponse(
# {
# "name": "Music Plugin",
# "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music",
# "plugin_id": plugin_id,
# "organisation_id": org_id,
# "room_id": roomid,
# "user_id": user_id,
# "group_name": [],
# "show_group": False,
# "category": "entertainment",
# "public_rooms": [pub_room],
# "joined_rooms": [pub_room],
# }
# )
# def is_valid(param):
# return param != "" and param is not None
| 41.26 | 134 | 0.479641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,027 | 0.976006 |
8b59bcd3a89ce1967c8f1f93333ca68f2476a3f5 | 6,331 | py | Python | BIT_OpenDomain_QA/rerank/utils_rerank.py | rwei1218/transformers | 511e100c650b3f942c432d8f71eee3ea1c0005a8 | [
"Apache-2.0"
]
| null | null | null | BIT_OpenDomain_QA/rerank/utils_rerank.py | rwei1218/transformers | 511e100c650b3f942c432d8f71eee3ea1c0005a8 | [
"Apache-2.0"
]
| null | null | null | BIT_OpenDomain_QA/rerank/utils_rerank.py | rwei1218/transformers | 511e100c650b3f942c432d8f71eee3ea1c0005a8 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
""" Load Duqa labeled dataset. """
from __future__ import absolute_import, division, print_function
import collections
import json
import logging
import math
from io import open
from tqdm import tqdm
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample."""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_json_data(cls, input_file):
"""Read a json file"""
lines = list(open(input_file, 'r', encoding='utf8').readlines())
lines = [json.loads(line) for line in lines]
return lines
class DuQAProcessor(DataProcessor):
"""Processor for the DuReader data set:"""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train_labeled.json")))
return self._create_examples(self._read_json_data(os.path.join(data_dir, "train_labeled.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_data(os.path.join(data_dir, "dev_labeled.json")), "dev")
def get_predict_examples(self, examples):
""" get predict examples
Args:
data_file: list include many json
"""
return self._create_examples(examples, "infer")
def get_labels(self):
"""
- 0:not_most_related
- 1: most_related
"""
return [0, 1]
def _create_examples(self, examples, set_type):
"""
here we input a example list:
[
{
"question_id": int,
"question": string,
"doc_tokens": string,
"mrc_logits": float,
"answer":sring,
}
"""
examples_list = []
for id, example in enumerate(examples):
guid = set_type + '-' + str(id)
text_a = example['question']
text_b = example['answer']
label = 0 ## 在predict环节这里没用,只是一个tag
examples_list.append(
InputExample(
guid=guid,
text_a=text_a,
text_b=text_b,
label=label
)
)
return examples_list
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc='loading_data'):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
processors = {
'duqa': DuQAProcessor,
}
num_labels_task = {
'duqa': 2,
}
| 31.655 | 113 | 0.59027 | 3,140 | 0.493944 | 0 | 0 | 232 | 0.036495 | 0 | 0 | 1,699 | 0.267264 |
8b59f06aa5c12c6a5c23df65ae4eee79a9122e69 | 1,973 | py | Python | LanguageConstructs/DataModel/MetaProgramming/Reflection/attribute_builtins.py | ha-khan/PythonPractice | 31366d0a3380b168b96cf2e90cef3960efee8a7e | [
"MIT"
]
| null | null | null | LanguageConstructs/DataModel/MetaProgramming/Reflection/attribute_builtins.py | ha-khan/PythonPractice | 31366d0a3380b168b96cf2e90cef3960efee8a7e | [
"MIT"
]
| null | null | null | LanguageConstructs/DataModel/MetaProgramming/Reflection/attribute_builtins.py | ha-khan/PythonPractice | 31366d0a3380b168b96cf2e90cef3960efee8a7e | [
"MIT"
]
| null | null | null | from typing import Any
class Orchestrator:
# __class__ Reference to the object's class
#
# __dict__ Mapping that stores the writable attributes of an object or class
#
# __slots__ Attribute that may be defined in a class to limit the attributes its instances can have.
#
#
def __init__(self) -> None:
pass
def __setattr__(self, __name: str, __value: Any) -> None:
"""
setattr() operator or . operator will always invoke this special method and "hook" in a
check to see if setting an attribute dynamically (monkey patch)
"""
if __name in vars(self):
raise ValueError('name: {} already set!'.format(__name))
self.__dict__[__name] = __value
# causes infinite recursion
# def __getattribute__(self, __name: str) -> Any:
# if __name in vars(self):
# return self.__dict__[__name]
# return vars(self)[__name]
def main():
o = Orchestrator()
apply = lambda a: print('applying {}'.format(a))
# invokes __setattr__
setattr(o,'apply', apply)
print('Has attribute \'apply\' is {}'.format(hasattr(o, 'apply')))
# invokes __getattr__
o_apply = getattr(o, 'apply')
o_apply('deployment')
print(o.__dict__ == vars(o))
print(o.__class__ == type(o))
#print(Orchestrator.__dict__)
try:
setattr(o,'apply', None)
except ValueError as e:
print(e)
delete = eval('lambda a: print(\'deleting {}\'.format(a))')
setattr(o, 'delete', delete)
o.delete('a')
try:
o.delete = None
except ValueError as e:
print(e)
try:
delattr(o, 'delete')
o.delete('deployment')
except AttributeError as e:
print(e)
print(dir(o))
print(locals())
print(globals())
print(callable(Orchestrator))
print(callable(o))
print(isinstance(o, Orchestrator))
if __name__ == '__main__':
main()
| 25.294872 | 104 | 0.601115 | 943 | 0.477952 | 0 | 0 | 0 | 0 | 0 | 0 | 845 | 0.428282 |
8b5af6372e48aa5e412d730c4fca44191540f238 | 1,360 | py | Python | src/gui/components/weeklycolormesh.py | larashores/spotify-analyzer | 98022b178ce3ef1b07a8f005aeba2aeb573125ee | [
"MIT"
]
| null | null | null | src/gui/components/weeklycolormesh.py | larashores/spotify-analyzer | 98022b178ce3ef1b07a8f005aeba2aeb573125ee | [
"MIT"
]
| null | null | null | src/gui/components/weeklycolormesh.py | larashores/spotify-analyzer | 98022b178ce3ef1b07a8f005aeba2aeb573125ee | [
"MIT"
]
| null | null | null | import collections
import colorsys
from typing import Iterable, List, Tuple
import matplotobjlib as plot
from backports import zoneinfo
from matplotlib.colors import ListedColormap
import utils
from gui.components import PlotComponent
from gui.options import ArtistChooser, ColorMap, Spinbox
from track import Track
class WeeklyColorMesh(PlotComponent):
name = "Weekly Color Mesh"
adjust = plot.SubplotsAdjust(left=0.12, right=0.975, top=0.975, bottom=0.09)
options = (ColorMap,)
def subplot(self, tracks: List[Track], color_map: ListedColormap) -> plot.SubPlot: # type: ignore # pylint: disable=arguments-differ
values = [[0 for i in range(24)] for i in range(7)]
for track in tracks:
values[-((utils.in_day(track).weekday() - 5)) % 7][utils.in_hour(track).hour - 1] += 1
return plot.SubPlot(
plot.Colormesh(values, color_map),
x_tick_options=plot.TickOptions(
labels=[f"{i+1}\nam" for i in range(11)] + ["12\npm"] + [f"{i+1}\npm" for i in range(11)] + ["12\nam"],
values=[i + 0.5 for i in range(24)],
),
y_tick_options=plot.TickOptions(
labels=["Saturday", "Friday", "Thursday", "Wednesday", "Tuesday", "Monday", "Sunday"],
values=[i + 0.5 for i in range(7)],
),
)
| 35.789474 | 137 | 0.627206 | 1,039 | 0.763971 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.126471 |
8b5b05fdbf74764959912c9444f946a0e9f8ee11 | 3,524 | py | Python | hard-gists/1558831/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/1558831/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/1558831/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | # [h] interpolated nudge dialog
'''a simple RoboFont dialog for the famous "interpolated nudge" script'''
# Interpolated Nudge for RoboFont -- Travis Kochel
# http://tktype.tumblr.com/post/15254264845/interpolated-nudge-for-robofont
# Interpolated Nudge -- Christian Robertson
# http://betatype.com/node/18
from vanilla import *
from NudgeCore import *
class interpolatedNudgeDialog(object):
_title = "Nudge"
_button_1 = 30
_button_2 = 20
_padding = 10
_width = (_button_1 * 3) + (_padding * 2) - 2
_height = (_button_1 * 4) + (_padding * 3) - 2
_nudge = 10
def __init__(self):
self.w = FloatingWindow(
(self._width,
self._height),
self._title)
self.w._up = SquareButton(
(self._button_1 + self._padding - 1,
self._padding,
self._button_1,
self._button_1),
"+",
callback=self._up_callback)
self.w._left = SquareButton(
(self._padding,
self._button_1 + self._padding - 1,
self._button_1,
self._button_1),
"-",
callback=self._left_callback)
self.w._right = SquareButton(
((self._button_1 * 2) + self._padding - 2,
self._button_1 + (self._padding - 1),
self._button_1,
self._button_1),
"+",
callback=self._right_callback)
self.w._down = SquareButton(
(self._button_1 + self._padding - 1,
(self._button_1 * 2) + (self._padding - 2),
self._button_1,
self._button_1),
"-",
callback=self._down_callback)
# nudge size
self.w._nudge_value = EditText(
(self._padding,
(self._button_1 * 3) + (self._padding * 2) + 5,
(self._width / 2) - (self._padding * 1.5),
20),
self._nudge,
sizeStyle='small',
readOnly=True)
self.w._nudge_plus = SquareButton(
(-self._padding - 20,
(self._button_1 * 3) + (self._padding * 2) + 5,
self._button_2,
self._button_2),
'+',
sizeStyle='small',
callback=self.nudge_plus_callback)
self.w._nudge_minus = SquareButton(
(-self._padding - 39,
(self._button_1 * 3) + (self._padding * 2) + 5,
self._button_2,
self._button_2),
'-',
sizeStyle='small',
callback=self.nudge_minus_callback)
# open dialog
self.w.open()
def nudge_minus_callback(self, sender):
_nudge = int(self.w._nudge_value.get()) - 10
if _nudge >= 0:
self._nudge = _nudge
self.w._nudge_value.set(self._nudge)
def nudge_plus_callback(self, sender):
self._nudge = int(self.w._nudge_value.get()) + 10
self.w._nudge_value.set(self._nudge)
def _left_callback(self, sender):
nudgeSelected((-self._nudge, 0))
def _right_callback(self, sender):
nudgeSelected((self._nudge, 0))
def _up_callback(self, sender):
nudgeSelected((0, self._nudge))
def _down_callback(self, sender):
nudgeSelected((0, -self._nudge))
# run
interpolatedNudgeDialog()
| 31.464286 | 75 | 0.521566 | 3,130 | 0.888195 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.106981 |
8b5d964924108495e0cb8ad5afc9e9b8d784d6b3 | 1,547 | py | Python | django_query_profiler/django/db/backends/database_wrapper_mixin.py | sonej/django-query-profiler | 4afe3694ded26d7ba0b435f5666e990b668d85b5 | [
"BSD-3-Clause"
]
| 97 | 2020-03-03T01:20:35.000Z | 2022-03-23T14:06:09.000Z | django_query_profiler/django/db/backends/database_wrapper_mixin.py | sonej/django-query-profiler | 4afe3694ded26d7ba0b435f5666e990b668d85b5 | [
"BSD-3-Clause"
]
| 24 | 2020-03-06T17:35:08.000Z | 2022-02-09T20:06:05.000Z | django_query_profiler/django/db/backends/database_wrapper_mixin.py | sonej/django-query-profiler | 4afe3694ded26d7ba0b435f5666e990b668d85b5 | [
"BSD-3-Clause"
]
| 9 | 2020-03-22T18:17:09.000Z | 2022-01-31T18:59:11.000Z | """
This module defines a mixin, which can be used by all implementations for all databases.
All the databases have a different hierarchy of DatabaseWrapper, but all of them derive from BaseDatabaseWrapper
"""
from abc import ABC
from typing import Optional
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorDebugWrapper, CursorWrapper
from .cursor_wrapper_instrumentation import QueryProfilerCursorDebugWrapper, QueryProfilerCursorWrapper
class QueryProfilerDatabaseWrapperMixin(BaseDatabaseWrapper, ABC):
def cursor(self):
cursor_wrapper = super().cursor()
kwargs = dict(
cursor=cursor_wrapper.cursor,
db=cursor_wrapper.db,
db_row_count=self.db_row_count(cursor_wrapper.cursor))
if isinstance(cursor_wrapper, CursorDebugWrapper):
return QueryProfilerCursorDebugWrapper(**kwargs)
elif isinstance(cursor_wrapper, CursorWrapper):
return QueryProfilerCursorWrapper(**kwargs)
else:
raise Exception("cursor_wrapper is not of either of {CursorWrapper, CursorDebugWrapper}. Is it because of "
"new version of django? Did you run the tests in the django_query_profiler - they must "
"have failed")
@staticmethod
def db_row_count(cursor) -> Optional[int]:
"""
Implementation varies by database types, having it as a function allows it to be overriden
"""
return cursor.rowcount
| 39.666667 | 120 | 0.707822 | 1,047 | 0.676794 | 0 | 0 | 214 | 0.138332 | 0 | 0 | 518 | 0.334842 |
8b5e99254ec155e2d433487c1c07674f3203394e | 1,736 | py | Python | Demo/frontend-server.py | hlynch/Penguins_AIforEarth | bccedb68640b20c6c6849040ad57823e99dbd0c6 | [
"MIT"
]
| 2 | 2019-06-17T14:09:45.000Z | 2020-08-17T00:20:44.000Z | Demo/frontend-server.py | hlynch/Penguins_AIforEarth | bccedb68640b20c6c6849040ad57823e99dbd0c6 | [
"MIT"
]
| 6 | 2019-05-21T16:24:43.000Z | 2019-05-28T18:41:04.000Z | Demo/frontend-server.py | hlynch/Penguins_AIforEarth | bccedb68640b20c6c6849040ad57823e99dbd0c6 | [
"MIT"
]
| null | null | null | '''
Webserver for the Penguin Guano Classification AI4Earth API
To run:
export FLASK_APP=frontend-server.py
python -m flask run --host=0.0.0.0
To access the website, enter your IP address:5000 into a browser.
e.g., http://127.0.0.1:5000/
'''
from flask import Flask, send_from_directory, request
import requests
print("Running frontend server")
API_ENDPOINT = "http://penguinguano.eastus.azurecontainer.io:80/v1/pytorch_api/classify"
app = Flask(__name__, static_url_path='')
# front-end server stuff
@app.route('/')
def root():
return send_from_directory('', 'index.html')
@app.route('/about.html')
def send_about():
return send_from_directory('', 'about.html')
@app.route('/instructions.html')
def send_instructions():
return send_from_directory('', 'instructions.html')
@app.route('/static/static-templates/<path:path>')
def send_templates(path):
return send_from_directory('static/static-templates', path)
@app.route('/static/css/<path:path>')
def send_css(path):
return send_from_directory('static/css', path)
@app.route('/static/js/<path:path>')
def send_js(path):
return send_from_directory('static/js', path)
@app.route('/static/images/<path:path>')
def send_image(path):
return send_from_directory('static/images', path)
@app.route('/get-classification', methods=['GET', 'POST'])
def get_classification():
if request.form['type'] == 'sample':
# TODO: enforce strict pathing to static image dir only
data = open('.' + request.form['file'], 'rb').read()
else:
data = request.files.get('file', '')
r = requests.post(url = API_ENDPOINT, data = data,
headers={'Content-Type': 'application/octet-stream'})
return r.json()['image_url']
if __name__ == '__main__':
app.run()
| 24.111111 | 88 | 0.711982 | 0 | 0 | 0 | 0 | 1,162 | 0.669355 | 0 | 0 | 813 | 0.468318 |
8b6038fb868f4e95b06475e6967de2992f3ee654 | 2,947 | py | Python | src/brouwers/shop/migrations/0021_payment.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
]
| 6 | 2015-03-03T13:23:07.000Z | 2021-12-19T18:12:41.000Z | src/brouwers/shop/migrations/0021_payment.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
]
| 95 | 2015-02-07T00:55:39.000Z | 2022-02-08T20:22:05.000Z | src/brouwers/shop/migrations/0021_payment.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
]
| 2 | 2016-03-22T16:53:26.000Z | 2019-02-09T22:46:04.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-29 13:56
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
import brouwers.shop.models.utils
class Migration(migrations.Migration):
dependencies = [
("shop", "0020_auto_20190524_0927"),
]
operations = [
migrations.CreateModel(
name="Payment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"reference",
models.CharField(
default=brouwers.shop.models.utils.get_payment_reference,
help_text="A unique payment reference",
max_length=16,
unique=True,
verbose_name="reference",
),
),
(
"amount",
models.IntegerField(
help_text="Amount to be paid, in eurocents.",
verbose_name="amount",
),
),
(
"data",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
help_text="The exact payment data is provider-specific",
verbose_name="payment data",
),
),
(
"created",
models.DateTimeField(auto_now_add=True, verbose_name="created"),
),
(
"modified",
models.DateTimeField(auto_now=True, verbose_name="modified"),
),
(
"cart",
models.ForeignKey(
help_text="The shopping cart that generated this payment.",
on_delete=django.db.models.deletion.PROTECT,
to="shop.Cart",
verbose_name="shopping cart",
),
),
(
"payment_method",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="shop.PaymentMethod",
verbose_name="Payment method used",
),
),
],
options={
"verbose_name": "payment",
"verbose_name_plural": "payments",
},
),
]
| 33.11236 | 84 | 0.405836 | 2,676 | 0.908042 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.174415 |
8b620f703a95ef7c54125b1554d9a9e0de82f47e | 12,330 | py | Python | lib/rapi/auth/pam.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
]
| 2 | 2018-09-26T10:09:23.000Z | 2018-09-27T07:27:06.000Z | lib/rapi/auth/pam.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
]
| null | null | null | lib/rapi/auth/pam.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
]
| null | null | null | #
#
# Copyright (C) 2015, 2016 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module interacting with PAM performing authorization and authentication
This module authenticates and authorizes RAPI users based on their credintials.
Both actions are performed by interaction with PAM as a 'ganeti-rapi' service.
"""
import logging
try:
import ctypes as c # pylint: disable=F0401
import ctypes.util as util
except ImportError:
c = None
from ganeti import constants
from ganeti.errors import PamRapiAuthError
import ganeti.http as http
from ganeti.http.auth import HttpServerRequestAuthentication
from ganeti.rapi import auth
__all__ = ['PamAuthenticator']
DEFAULT_SERVICE_NAME = 'ganeti-rapi'
MAX_STR_LENGTH = 100000
MAX_MSG_COUNT = 100
PAM_ENV_URI = 'GANETI_RAPI_URI'
PAM_ENV_BODY = 'GANETI_REQUEST_BODY'
PAM_ENV_METHOD = 'GANETI_REQUEST_METHOD'
PAM_ENV_ACCESS = 'GANETI_RESOURCE_ACCESS'
PAM_ABORT = 26
PAM_BUF_ERR = 5
PAM_CONV_ERR = 19
PAM_SILENT = 32768
PAM_SUCCESS = 0
PAM_PROMPT_ECHO_OFF = 1
PAM_AUTHTOK = 6
PAM_USER = 2
if c:
class PamHandleT(c.Structure):
"""Wrapper for PamHandleT
"""
_fields_ = [("hidden", c.c_void_p)]
def __init__(self):
c.Structure.__init__(self)
self.handle = 0
class PamMessage(c.Structure):
"""Wrapper for PamMessage
"""
_fields_ = [
("msg_style", c.c_int),
("msg", c.c_char_p),
]
class PamResponse(c.Structure):
"""Wrapper for PamResponse
"""
_fields_ = [
("resp", c.c_char_p),
("resp_retcode", c.c_int),
]
CONV_FUNC = c.CFUNCTYPE(c.c_int, c.c_int, c.POINTER(c.POINTER(PamMessage)),
c.POINTER(c.POINTER(PamResponse)), c.c_void_p)
class PamConv(c.Structure):
"""Wrapper for PamConv
"""
_fields_ = [
("conv", CONV_FUNC),
("appdata_ptr", c.c_void_p),
]
class CFunctions(object):
def __init__(self):
if not c:
raise PamRapiAuthError("ctypes Python package is not found;"
" remote API PAM authentication is not available")
self.libpam = c.CDLL(util.find_library("pam"))
if not self.libpam:
raise PamRapiAuthError("libpam C library is not found;"
" remote API PAM authentication is not available")
self.libc = c.CDLL(util.find_library("c"))
if not self.libc:
raise PamRapiAuthError("libc C library is not found;"
" remote API PAM authentication is not available")
self.pam_acct_mgmt = self.libpam.pam_acct_mgmt
self.pam_acct_mgmt.argtypes = [PamHandleT, c.c_int]
self.pam_acct_mgmt.restype = c.c_int
self.pam_authenticate = self.libpam.pam_authenticate
self.pam_authenticate.argtypes = [PamHandleT, c.c_int]
self.pam_authenticate.restype = c.c_int
self.pam_end = self.libpam.pam_end
self.pam_end.argtypes = [PamHandleT, c.c_int]
self.pam_end.restype = c.c_int
self.pam_get_item = self.libpam.pam_get_item
self.pam_get_item.argtypes = [PamHandleT, c.c_int, c.POINTER(c.c_void_p)]
self.pam_get_item.restype = c.c_int
self.pam_putenv = self.libpam.pam_putenv
self.pam_putenv.argtypes = [PamHandleT, c.c_char_p]
self.pam_putenv.restype = c.c_int
self.pam_set_item = self.libpam.pam_set_item
self.pam_set_item.argtypes = [PamHandleT, c.c_int, c.c_void_p]
self.pam_set_item.restype = c.c_int
self.pam_start = self.libpam.pam_start
self.pam_start.argtypes = [
c.c_char_p,
c.c_char_p,
c.POINTER(PamConv),
c.POINTER(PamHandleT),
]
self.pam_start.restype = c.c_int
self.calloc = self.libc.calloc
self.calloc.argtypes = [c.c_uint, c.c_uint]
self.calloc.restype = c.c_void_p
self.free = self.libc.free
self.free.argstypes = [c.c_void_p]
self.free.restype = None
self.strndup = self.libc.strndup
self.strndup.argstypes = [c.c_char_p, c.c_uint]
self.strndup.restype = c.c_char_p
def Authenticate(cf, pam_handle, authtok=None):
"""Performs authentication via PAM.
Perfroms two steps:
- if authtok is provided then set it with pam_set_item
- call pam_authenticate
"""
try:
authtok_copy = None
if authtok:
authtok_copy = cf.strndup(authtok, len(authtok))
if not authtok_copy:
raise http.HttpInternalServerError("Not enough memory for PAM")
ret = cf.pam_set_item(c.pointer(pam_handle), PAM_AUTHTOK, authtok_copy)
if ret != PAM_SUCCESS:
raise http.HttpInternalServerError("pam_set_item failed [%d]" % ret)
ret = cf.pam_authenticate(pam_handle, 0)
if ret == PAM_ABORT:
raise http.HttpInternalServerError("pam_authenticate requested abort")
if ret != PAM_SUCCESS:
raise http.HttpUnauthorized("Authentication failed")
except:
cf.pam_end(pam_handle, ret)
raise
finally:
if authtok_copy:
cf.free(authtok_copy)
def PutPamEnvVariable(cf, pam_handle, name, value):
"""Wrapper over pam_setenv.
"""
setenv = "%s=" % name
if value:
setenv += value
ret = cf.pam_putenv(pam_handle, setenv)
if ret != PAM_SUCCESS:
raise http.HttpInternalServerError("pam_putenv call failed [%d]" % ret)
def Authorize(cf, pam_handle, uri_access_rights, uri=None, method=None,
body=None):
"""Performs authorization via PAM.
Performs two steps:
- initialize environmental variables
- call pam_acct_mgmt
"""
try:
PutPamEnvVariable(cf, pam_handle, PAM_ENV_ACCESS, uri_access_rights)
PutPamEnvVariable(cf, pam_handle, PAM_ENV_URI, uri)
PutPamEnvVariable(cf, pam_handle, PAM_ENV_METHOD, method)
PutPamEnvVariable(cf, pam_handle, PAM_ENV_BODY, body)
ret = cf.pam_acct_mgmt(pam_handle, PAM_SILENT)
if ret != PAM_SUCCESS:
raise http.HttpUnauthorized("Authorization failed")
except:
cf.pam_end(pam_handle, ret)
raise
def ValidateParams(username, _uri_access_rights, password, service, authtok,
_uri, _method, _body):
"""Checks whether ValidateRequest has been called with a correct params.
These checks includes:
- username is an obligatory parameter
- either password or authtok is an obligatory parameter
"""
if not username:
raise http.HttpUnauthorized("Username should be provided")
if not service:
raise http.HttpBadRequest("Service should be proivded")
if not password and not authtok:
raise http.HttpUnauthorized("Password or authtok should be provided")
def ValidateRequest(cf, username, uri_access_rights, password=None,
service=DEFAULT_SERVICE_NAME, authtok=None, uri=None,
method=None, body=None):
"""Checks whether it's permitted to execute an rapi request.
Calls pam_authenticate and then pam_acct_mgmt in order to check whether a
request should be executed.
@param cf: An instance of CFunctions class containing necessary imports
@param username: username
@param uri_access_rights: handler access rights
@param password: password
@param service: a service name that will be used for the interaction with PAM
@param authtok: user's authentication token (e.g. some kind of signature)
@param uri: an uri of a target resource obtained from an http header
@param method: http method trying to access the uri
@param body: a body of an RAPI request
@return: On success - authenticated user name. Throws an exception otherwise.
"""
ValidateParams(username, uri_access_rights, password, service, authtok, uri,
method, body)
def ConversationFunction(num_msg, msg, resp, _app_data_ptr):
"""Conversation function that will be provided to PAM modules.
The function replies with a password for each message with
PAM_PROMPT_ECHO_OFF style and just ignores the others.
"""
if num_msg > MAX_MSG_COUNT:
logging.warning("Too many messages passed to conv function: [%d]",
num_msg)
return PAM_BUF_ERR
response = cf.calloc(num_msg, c.sizeof(PamResponse))
if not response:
logging.warning("calloc failed in conv function")
return PAM_BUF_ERR
resp[0] = c.cast(response, c.POINTER(PamResponse))
for i in range(num_msg):
if msg[i].contents.msg_style != PAM_PROMPT_ECHO_OFF:
continue
resp.contents[i].resp = cf.strndup(password, len(password))
if not resp.contents[i].resp:
logging.warning("strndup failed in conv function")
for j in range(i):
cf.free(c.cast(resp.contents[j].resp, c.c_void_p))
cf.free(response)
return PAM_BUF_ERR
resp.contents[i].resp_retcode = 0
return PAM_SUCCESS
pam_handle = PamHandleT()
conv = PamConv(CONV_FUNC(ConversationFunction), 0)
ret = cf.pam_start(service, username, c.pointer(conv), c.pointer(pam_handle))
if ret != PAM_SUCCESS:
cf.pam_end(pam_handle, ret)
raise http.HttpInternalServerError("pam_start call failed [%d]" % ret)
Authenticate(cf, pam_handle, authtok)
Authorize(cf, pam_handle, uri_access_rights, uri, method, body)
# retrieve the authorized user name
puser = c.c_void_p()
ret = cf.pam_get_item(pam_handle, PAM_USER, c.pointer(puser))
if ret != PAM_SUCCESS or not puser:
cf.pam_end(pam_handle, ret)
raise http.HttpInternalServerError("pam_get_item call failed [%d]" % ret)
user_c_string = c.cast(puser, c.c_char_p)
cf.pam_end(pam_handle, PAM_SUCCESS)
return user_c_string.value
def MakeStringC(string):
"""Converts a string to a valid C string.
As a C side treats non-unicode strings, encode unicode string with 'ascii'.
Also ensure that C string will not be longer than MAX_STR_LENGTH in order to
prevent attacs based on too long buffers.
"""
if string is None:
return None
if isinstance(string, unicode):
string = string.encode("ascii")
if not isinstance(string, str):
return None
if len(string) <= MAX_STR_LENGTH:
return string
return string[:MAX_STR_LENGTH]
class PamAuthenticator(auth.RapiAuthenticator):
"""Class providing an Authenticate method based on interaction with PAM.
"""
def __init__(self):
"""Checks whether ctypes has been imported.
"""
self.cf = CFunctions()
def ValidateRequest(self, req, handler_access, _):
"""Checks whether a user can access a resource.
This function retuns authenticated user name on success.
"""
username, password = HttpServerRequestAuthentication \
.ExtractUserPassword(req)
authtok = req.request_headers.get(constants.HTTP_RAPI_PAM_CREDENTIAL, None)
if handler_access is not None:
handler_access_ = ','.join(handler_access)
return ValidateRequest(self.cf, MakeStringC(username),
MakeStringC(handler_access_),
MakeStringC(password),
MakeStringC(DEFAULT_SERVICE_NAME),
MakeStringC(authtok), MakeStringC(req.request_path),
MakeStringC(req.request_method),
MakeStringC(req.request_body))
| 32.447368 | 79 | 0.701703 | 3,857 | 0.312814 | 0 | 0 | 0 | 0 | 0 | 0 | 4,601 | 0.373155 |
8b6521edee5c7a6f815e52a5b53c02dede9be866 | 918 | py | Python | iconparse/image_store.py | donk-project/pydonk | 50417ce9e655cdcab20918b474039426f583d6d3 | [
"MIT"
]
| null | null | null | iconparse/image_store.py | donk-project/pydonk | 50417ce9e655cdcab20918b474039426f583d6d3 | [
"MIT"
]
| null | null | null | iconparse/image_store.py | donk-project/pydonk | 50417ce9e655cdcab20918b474039426f583d6d3 | [
"MIT"
]
| null | null | null | # Donk Project
# Copyright (c) 2021 Warriorstar Orion <[email protected]>
# SPDX-License-Identifier: MIT
import pathlib
from typing import Dict
from iconparse.reader import DmiData, Reader
from iconparse.extractor import Extractor
class ImageStore:
def __init__(self, root: pathlib.Path):
self.root: pathlib.Path = root
self.dmi_datas: Dict[pathlib.Path, DmiData] = dict()
self.extractors: Dict[pathlib.Path, Extractor] = dict()
def GetDmiData(self, filename) -> DmiData:
p = self.root / pathlib.Path(filename)
if p not in self.dmi_datas:
self.dmi_datas[p] = Reader(p).Read()
return self.dmi_datas[p]
def GetExtractor(self, filename) -> Extractor:
p = self.root / pathlib.Path(filename)
if p not in self.extractors:
self.extractors[p] = Extractor(self.GetDmiData(filename))
return self.extractors[p]
| 32.785714 | 69 | 0.676471 | 678 | 0.738562 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.116558 |
8b655e5cbdbfcf38233bc910318fcb6e68177e29 | 28 | py | Python | network/__init__.py | sveatlo/inpainting | 6870ee56beea7401aa97194f76487c391af9dd5d | [
"Unlicense"
]
| 1 | 2021-08-08T03:17:17.000Z | 2021-08-08T03:17:17.000Z | network/__init__.py | sveatlo/inpainting | 6870ee56beea7401aa97194f76487c391af9dd5d | [
"Unlicense"
]
| 6 | 2021-08-08T13:12:55.000Z | 2022-03-13T15:26:02.000Z | network/__init__.py | sveatlo/unmasked | 6870ee56beea7401aa97194f76487c391af9dd5d | [
"Unlicense"
]
| null | null | null | from .gan import SNPatchGAN
| 14 | 27 | 0.821429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8b666913019cd3ac664dfb714c512a8beb73daff | 10,601 | py | Python | mssql_backend/mssql_backend.py | Reposoft/trac-mssql | da8d8ae29ef81db39ca2d6af439d88f3d6ecfebd | [
"BSD-3-Clause"
]
| 1 | 2021-01-27T00:21:47.000Z | 2021-01-27T00:21:47.000Z | mssql_backend/mssql_backend.py | Reposoft/trac-mssql | da8d8ae29ef81db39ca2d6af439d88f3d6ecfebd | [
"BSD-3-Clause"
]
| 1 | 2015-05-11T18:34:46.000Z | 2017-02-12T07:07:06.000Z | mssql_backend/mssql_backend.py | Reposoft/trac-mssql | da8d8ae29ef81db39ca2d6af439d88f3d6ecfebd | [
"BSD-3-Clause"
]
| 1 | 2021-01-27T00:21:50.000Z | 2021-01-27T00:21:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 MATOBA Akihiro <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from trac.core import *
from trac.config import Option
from trac.core import Component, implements
from trac.db.api import ConnectionBase
from trac.db.api import DatabaseManager
from trac.db.api import IDatabaseConnector
from trac.db.api import _parse_db_str, get_column_names
from trac.db.api import ConnectionBase
from trac.db.util import ConnectionWrapper
from trac.env import IEnvironmentSetupParticipant, ISystemInfoProvider
from trac.env import BackupError
from trac.db import Table, Column
import re
try:
import pymssql as pymssql
has_mssql = True
except ImportError:
has_mssql = False
# force enables this plugin in trac-admin initenv
#enabled = BoolOption("components", "mssql_backend.*", "enabled")
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int64': 'bigint',
'text': 'nvarchar(512)',
}
# TODO: You cannot use MS Access because column name 'value' can seems not use via odbc.
_column_map = {
'key': '"key"',
# 'value': '"value"'
}
re_limit = re.compile(" LIMIT (\d+)( OFFSET (\d+))?", re.IGNORECASE)
re_order_by = re.compile("ORDER BY ", re.IGNORECASE)
re_where = re.compile("WHERE ", re.IGNORECASE)
re_equal = re.compile("(\w+)\s*=\s*(['\w]+|\?)", re.IGNORECASE)
re_isnull = re.compile("(\w+) IS NULL", re.IGNORECASE)
re_select = re.compile('SELECT( DISTINCT)?( TOP)?', re.IGNORECASE)
re_coalesce_equal = re.compile("(COALESCE\([^)]+\))=([^,]+)", re.IGNORECASE)
class MSSQLConnector(Component):
implements(IDatabaseConnector, IEnvironmentSetupParticipant,
ISystemInfoProvider)
required = False
def __init__(self):
self._mssql_version = None
# ISystemInfoProvider methods
def get_system_info(self):
if self.required:
yield 'pymssql', self._mssql_version
# IDatabaseConnector methods
def get_supported_schemes(self):
yield ('mssql', 1)
def init_db(self, path, schema=None, log=None, user=None, password=None,\
host=None, port=None, params={}):
cnx = self.get_connection(path, log, user, password, host, port, params)
cursor = cnx.cursor()
if schema is None:
from trac.db_default import schema
for table in schema:
for stmt in _to_sql(table):
cursor.execute(stmt)
cnx.commit()
def get_connection(self, path, log=None, user=None, password=None,
host=None, port=None, params={}):
cnx = MSSQLConnection(path, log, user, password, host, port, params)
return cnx
# IEnvironmentSetupParticipant methods
def environment_created(self):
pass
def environment_needs_upgrade(self):
return False
def upgrade_environment(self):
pass
def get_exceptions(self):
return pymssql
class MSSQLConnection(ConnectionBase, ConnectionWrapper):
"""Connection wrapper for MSSQL."""
poolable = True
def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}):
if path.startswith('/'):
path = path[1:]
if 'host' in params:
host = params['host']
cnx = pymssql.connect(database=path, user=user, password=password, host=host, port=port)
self.schema = path
conn = ConnectionWrapper.__init__(self, cnx, log)
self._is_closed = False
def cursor(self):
cursor = SQLServerCursor(self.cnx.cursor(), self.log)
cursor.cnx = self
return cursor
def rollback(self):
try:
self.cnx.rollback()
except pymssql.ProgrammingError:
self._is_closed = True
def close(self):
if not self._is_closed:
try:
self.cnx.close()
except pymssql.ProgrammingError:
pass # this error would mean it's already closed. So, ignore
self._is_closed = True
def cast(self, column, type):
if type == 'signed':
type = 'int'
elif type == 'text':
type = 'varchar(max)'
return 'CAST(%s AS %s)' % (column, type)
def concat(self, *args):
return 'concat(%s)' % ', '.join(args)
def drop_table(self, table):
cursor = pymssql.cursors.Cursor(self.cnx)
cursor._defer_warnings = True # ignore "Warning: Unknown table ..."
cursor.execute("DROP TABLE IF EXISTS " + self.quote(table))
def get_column_names(self, table):
rows = self.execute("""
SELECT column_name FROM information_schema.columns
WHERE table_schema=%s AND table_name=%s
""", (self.schema, table))
return [row[0] for row in rows]
def get_last_id(self, cursor, table, column='id'):
return cursor.lastrowid
def get_table_names(self):
rows = self.execute("""
SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (self.schema,))
return [row[0] for row in rows]
def like(self):
return 'LIKE %s'
# TODO quick hacked. check me.
def like_escape(self, text):
return text
# TODO quick hacked. check me.
def prefix_match(self):
return "LIKE %s ESCAPE '/'"
def prefix_match_value(self, prefix):
return self.like_escape(prefix) + '%'
def quote(self, identifier):
return '"%s"' % identifier
def update_sequence(self, cursor, table, column='id'):
# MSSQL handles sequence updates automagically
pass
def _to_sql(table):
sql = ["CREATE TABLE %s (" % table.name]
coldefs = []
for column in table.columns:
column.name = _column_map.get(column.name, column.name)
ctype = column.type.lower()
ctype = _type_map.get(ctype, ctype)
# for SQL Server, patch for "enum" table, value is not text, use int instead.
if table.name == 'enum' and column.name == 'value':
ctype = 'int'
if (table.name, column.name) in [
('wiki', 'text'),
('report', 'query'),
('report', 'description'),
('milestone', 'description'),
('version', 'description'),
]:
ctype = 'nvarchar(MAX)'
if (table.name, column.name) in [
('ticket', 'description'),
('ticket_change', 'oldvalue'),
('ticket_change', 'newvalue'),
('ticket_custom', 'value'),
('session_attribute', 'value')
]:
ctype = 'nvarchar(4000)'
# I'm using SQL Userver 2012 Express
if column.auto_increment:
ctype = 'INT IDENTITY NOT NULL' # SQL Server Style
# ctype = 'INT UNSIGNED NOT NULL AUTO_INCREMENT' # MySQL Style
# ctype = 'SERIAL' # PGSQL Style
# ctype = "integer constraint P_%s PRIMARY KEY" % table.name # SQLite Style
else:
# if column.name in table.key or any([column.name in index.columns for index in table.indices]):
# ctype = {'ntext': 'nvarchar(255)'}.get(ctype, ctype) # SQL Server cannot use text as PK
if len(table.key) == 1 and column.name in table.key:
ctype += " constraint P_%s PRIMARY KEY" % table.name
coldefs.append(" %s %s" % (column.name, ctype))
if len(table.key) > 1:
coldefs.append(" UNIQUE (%s)" % ','.join(table.key))
sql.append(',\n'.join(coldefs) + '\n);')
yield '\n'.join(sql)
for index in table.indices:
type_ = ('INDEX', 'UNIQUE INDEX')[index.unique]
yield "CREATE %s %s_%s_idx ON %s (%s);" % (type_, table.name,
'_'.join(index.columns), table.name, ','.join(index.columns))
class SQLServerCursor(object):
def __init__(self, cursor, log=None):
self.cursor = cursor
self.log = log
def __getattr__(self, name):
return getattr(self.cursor, name)
def __iter__(self):
while True:
row = self.cursor.fetchone()
if not row:
return
yield row
def execute(self, sql, args=None):
if args:
sql = sql % (('%s',) * len(args))
# replace __column__ IS NULL -> COALESCE(__column__, '') after ORDER BY
match = re_order_by.search(sql)
if match:
end = match.end()
for match in reversed([match for match in re_isnull.finditer(sql[end:])]):
replacement = "COALESCE(%s,'')" % match.group(1)
sql = sql[:end + match.start()] + replacement + sql[end + match.end():]
# replace __column__ = %s -> CASE __column__ WHEN %s THEN '0' ELSE '1' END after ORDER BY
match = re_order_by.search(sql)
if match:
end = match.end()
for match in reversed([match for match in re_equal.finditer(sql[end:])]):
replacement = "CASE %s WHEN %s THEN '0' ELSE '1' END" % (match.group(1), match.group(2))
sql = sql[:end + match.start()] + replacement + sql[end + match.end():]
for match in reversed([match for match in re_coalesce_equal.finditer(sql[end:])]):
replacement = "CASE %s WHEN %s THEN '0' ELSE '1' END" % (match.group(1), match.group(2))
sql = sql[:end + match.start()] + replacement + sql[end + match.end():]
# trim duplicated columns after ORDER BY
match = re_order_by.search(sql)
if match:
end = match.end()
match = re.search("'([a-z]+)'", sql[end:])
if match:
column_name = match.group(1)
re_columns = re.compile("([a-z]+.)?%s,?" % column_name)
order_by = ' '.join([column for column in match.string.split(' ') if not re_columns.match(column)])
self.log.debug(order_by)
sql = sql[:end] + order_by
# transform LIMIT clause
match = re_limit.search(sql)
if match:
limit = match.group(1)
offset = match.group(3)
if not offset:
# LIMIT n (without OFFSET) -> SELECT TOP n
sql = match.string[:match.start()].replace("SELECT", "SELECT TOP %s" % limit)
else:
# LIMIT n OFFSET m -> OFFSET m ROWS FETCH NEXT n ROWS ONLY
sql = match.string[:match.start()] + " OFFSET %s ROWS FETCH NEXT %s ROWS ONLY" % (offset, limit)
# match = re_where.search(sql)
# sql = match.string[:match.end()] + 'ROW_NUMBER() > %s, ' % limit + match.string[match.end():]
# avoid error in "order by" in sub query
# TODO: decide count of lines
else:
for match in reversed([match for match in re_select.finditer(sql) if match.group(2) == None]):
sql = sql[:match.end()] + ' TOP 1000' + sql[match.end():]
try:
if self.log: # See [trac] debug_sql in trac.ini
self.log.debug(sql)
self.log.debug(args)
if args:
self.cursor.execute(sql, tuple(args))
else:
self.cursor.execute(sql, ())
except:
self.cnx.rollback()
raise
def executemany(self, sql, args):
if not args:
return
sql = sql % (('%s',) * len(args[0]))
try:
if self.log: # See [trac] debug_sql in trac.ini
self.log.debug(sql)
self.log.debug(args)
self.cursor.executemany(sql, args)
except:
self.cnx.rollback()
raise
| 31.550595 | 111 | 0.647486 | 6,888 | 0.64975 | 2,204 | 0.207905 | 0 | 0 | 0 | 0 | 3,278 | 0.309216 |
8b668fce877fc1e0332e1fd014c47e5007f994ff | 6,767 | py | Python | CertifiableBayesianInference/BayesKeras/optimizers/adam.py | Hongchenglong/colab | 9cc5c15abde536493cc3f12008e791caa1d00070 | [
"Apache-2.0"
]
| null | null | null | CertifiableBayesianInference/BayesKeras/optimizers/adam.py | Hongchenglong/colab | 9cc5c15abde536493cc3f12008e791caa1d00070 | [
"Apache-2.0"
]
| null | null | null | CertifiableBayesianInference/BayesKeras/optimizers/adam.py | Hongchenglong/colab | 9cc5c15abde536493cc3f12008e791caa1d00070 | [
"Apache-2.0"
]
| null | null | null | #Author: Matthew Wicker
# Impliments the BayesByBackprop optimizer for BayesKeras
import os
import math
import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tqdm import tqdm
from tqdm import trange
from BayesKeras.optimizers import optimizer
from BayesKeras.optimizers import losses
from BayesKeras import analyzers
from abc import ABC, abstractmethod
# A dumb mistake on my part which needs to be factored out
def softplus(x):
return tf.math.softplus(x)
class Adam(optimizer.Optimizer):
def __init__(self):
super().__init__()
# I set default params for each sub-optimizer but none for the super class for
# pretty obvious reasons
def compile(self, keras_model, loss_fn, batch_size=64, learning_rate=0.15, decay=0.0,
epochs=10, prior_mean=-1, prior_var=-1, **kwargs):
super().compile(keras_model, loss_fn, batch_size, learning_rate, decay,
epochs, prior_mean, prior_var, **kwargs)
# Now we get into the NoisyAdam specific enrichments to the class
self.beta_1 = kwargs.get('beta_1', 0.99)
self.beta_2 = kwargs.get('beta_2', 0.9999)
self.lam = kwargs.get('lam', 0.5)
self.m = [0.0 for i in range(len(self.posterior_mean))]
self.posterior_var = [tf.zeros(i.shape) for i in self.posterior_mean]
return self
def step(self, features, labels, lrate):
alpha = lrate
beta_1 = self.beta_1
beta_2 = self.beta_2
lam = self.lam
posti_var = self.posterior_var
posti_mean = self.posterior_mean
N = float(self.batch_size) # batch size
with tf.GradientTape(persistent=True) as tape:
# Get the probabilities
predictions = self.model(features)
# Calculate the loss
if(int(self.robust_train) == 0):
loss = self.loss_func(labels, predictions)
elif(int(self.robust_train) == 1):
logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, eps=self.epsilon)
v1 = tf.one_hot(labels, depth=10)
v2 = 1 - tf.one_hot(labels, depth=10)
worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
worst_case = self.model.layers[-1].activation(worst_case)
loss = self.loss_func(labels, predictions, worst_case, self.robust_lambda)
#self.train_rob(labels, worst_case)
elif(int(self.robust_train) == 2):
features_adv = analyzers.FGSM(self, features, self.attack_loss, eps=self.epsilon, num_models=-1)
# Get the probabilities
worst_case = self.model(features_adv)
# Calculate the loss
loss = self.loss_func(labels, predictions, worst_case, self.robust_lambda)
weight_gradient = tape.gradient(loss, self.model.trainable_variables)
g = np.asarray(weight_gradient)
sq_grad = []
for i in range(len(weight_gradient)):
sq_grad.append(tf.math.multiply(weight_gradient[i],weight_gradient[i]))
self.m[i] = (beta_1*self.m[i]) + ((1-beta_1)*(g[i]+((lam*posti_mean[i])/N)))
posti_var[i] = (beta_2*posti_var[i]) + ((1-beta_2)*(sq_grad[i]))
sq_grad = np.asarray(sq_grad); self.m = np.asarray(self.m)
posti_var = np.asarray(posti_var)
for i in range(len(weight_gradient)):
m_ = self.m[i]/(1-beta_1)
s_ = np.sqrt(posti_var[i]) + lam/N
posti_mean[i] = posti_mean[i] - (alpha*(m_/s_))
self.model.set_weights(posti_mean)
self.train_loss(loss)
self.train_metric(labels, predictions)
return posti_mean, posti_var
def old_step(self, features, labels, lrate):
# OPTIMIZATION PARAMETERS:
alpha = lrate #self.alpha
beta_1 = self.beta_1
beta_2 = self.beta_2
lam = self.lam
posti_mean = self.model.get_weights()
self.model.set_weights(posti_mean)
with tf.GradientTape(persistent=True) as tape:
# Get the probabilities
predictions = self.model(features)
# Calculate the loss
if(int(self.robust_train) == 0):
loss = self.loss_func(labels, predictions)
elif(int(self.robust_train) == 1):
logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, eps=self.epsilon)
v1 = tf.one_hot(labels, depth=10)
v2 = 1 - tf.one_hot(labels, depth=10)
worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
worst_case = self.model.layers[-1].activation(worst_case)
loss = self.loss_func(labels, predictions, worst_case, self.robust_lambda)
#self.train_rob(labels, worst_case)
elif(int(self.robust_train) == 2):
features_adv = analyzers.FGSM(self, features, self.attack_loss, eps=self.epsilon, num_models=-1)
# Get the probabilities
worst_case = self.model(features_adv)
# Calculate the loss
loss = self.loss_func(labels, predictions, worst_case, self.robust_lambda)
weight_gradient = tape.gradient(loss, self.model.trainable_variables)
g = np.asarray(weight_gradient)
#print(g)
sq_grad = []
for i in range(len(weight_gradient)):
sq_grad.append(tf.math.multiply(weight_gradient[i],weight_gradient[i]))
self.m[i] = (beta_1*self.m[i]) + ((1-beta_1)*(g[i]))
self.posterior_var[i] = (beta_2*self.posterior_var[i]) + ((1-beta_2)*(sq_grad[i]))
#print("sq: ", sq_grad)
sq_grad = np.asarray(sq_grad); self.m = np.asarray(self.m)
self.posterior_var = np.asarray(self.posterior_var)
for i in range(len(weight_gradient)):
m_ = self.m[i]/(1-beta_1)
s_ = np.sqrt(self.posterior_var[i])
#print(alpha*(m_/s_))
self.posterior_mean[i] = self.posterior_mean[i] - (alpha*(m_/s_))
#self.model.set_weights(self.posterior_mean)
self.train_loss(loss)
self.train_metric(labels, predictions)
return self.posterior_mean, self.posterior_var
def train(self, X_train, y_train, X_test=None, y_test=None):
super().train(X_train, y_train, X_test, y_test)
def sample(self):
return self.model.get_weights()
| 42.031056 | 114 | 0.612827 | 6,178 | 0.91296 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.107285 |
8b66b9f64668e1a15163413263d5b63cdc824a7c | 1,435 | py | Python | Scripts/ExplicitInstantation.py | fbudin69500/calatk | 3cee90488feab7e3ef2ade1f791106aa7f11e404 | [
"Apache-2.0"
]
| 2 | 2019-09-15T12:51:02.000Z | 2020-04-08T14:03:58.000Z | Scripts/ExplicitInstantation.py | cpatrick/calatk | 849c17919ac5084b5b067c7631bc2aa1efd650df | [
"Apache-2.0"
]
| null | null | null | Scripts/ExplicitInstantation.py | cpatrick/calatk | 849c17919ac5084b5b067c7631bc2aa1efd650df | [
"Apache-2.0"
]
| 1 | 2018-10-20T16:38:28.000Z | 2018-10-20T16:38:28.000Z | #!/usr/bin/env python
"""Create a .cxx file that performs explicit instantiation over float/double and
dimensions 1, 2, and 3. Writes the file to the current directory."""
usage = "ExplicitInstantiation.py <class_name>"
import sys
if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
print(usage)
sys.exit(1)
copyright_header = """/*
*
* Copyright 2011 by the CALATK development team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
"""
explicit_file = open(sys.argv[1] + '.cxx', 'w')
explicit_file.write(copyright_header)
content = """
#include "{0}.txx"
namespace CALATK
{
template class {0}< float, 1 >;
template class {0}< float, 2 >;
template class {0}< float, 3 >;
template class {0}< double, 1 >;
template class {0}< double, 2 >;
template class {0}< double, 3 >;
} // namespace CALATK
""".replace('{0}', sys.argv[1])
explicit_file.write(content)
explicit_file.close()
| 26.090909 | 80 | 0.694774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,130 | 0.787456 |
8b67d69e37e542f410bab436a641c536c8c9539f | 3,231 | py | Python | aiopogo/auth_google.py | DennyLoko/aiopogo | 55a9efe13c51261c68ab2abe8efc4ac69e04eb01 | [
"MIT"
]
| 14 | 2017-03-28T16:32:24.000Z | 2021-03-13T23:03:57.000Z | aiopogo/auth_google.py | ultrafunkamsterdam/aiopogo | 43444c994a400bc9bc8fd1ccaa6a1f79ff5df1fe | [
"MIT"
]
| 8 | 2017-03-01T07:56:09.000Z | 2017-08-15T07:37:12.000Z | aiopogo/auth_google.py | ultrafunkamsterdam/aiopogo | 43444c994a400bc9bc8fd1ccaa6a1f79ff5df1fe | [
"MIT"
]
| 14 | 2017-04-08T20:01:50.000Z | 2017-08-19T04:23:57.000Z | from concurrent.futures import ThreadPoolExecutor
from functools import partial
from time import time
try:
from gpsoauth import perform_master_login, perform_oauth
except ImportError:
def perform_master_login(*args, **kwargs):
raise ImportError('Must install gpsoauth to use Google accounts')
perform_oauth = perform_master_login
from .auth import Auth
from .exceptions import AuthException, InvalidCredentialsException
class AuthGoogle(Auth):
GOOGLE_LOGIN_ANDROID_ID = '9774d56d682e549c'
GOOGLE_LOGIN_SERVICE = 'audience:server:client_id:848232511240-7so421jotr2609rmqakceuu1luuq0ptb.apps.googleusercontent.com'
GOOGLE_LOGIN_APP = 'com.nianticlabs.pokemongo'
GOOGLE_LOGIN_CLIENT_SIG = '321187995bc7cdc2b5fc91b11a96e2baa8602c62'
def __init__(self, proxy=None, refresh_token=None):
Auth.__init__(self)
self.provider = 'google'
self._refresh_token = refresh_token
self._proxy = proxy
async def user_login(self, username, password):
self.log.info('Google User Login for: %s', username)
try:
assert (isinstance(username, str)
and isinstance(password, str))
except AssertionError:
raise InvalidCredentialsException(
"Username/password not correctly specified")
login = partial(
perform_master_login,
username,
password,
self.GOOGLE_LOGIN_ANDROID_ID,
proxy=self._proxy)
with ThreadPoolExecutor(max_workers=1) as executor:
user_login = await self.loop.run_in_executor(executor, login)
try:
self._refresh_token = user_login['Token']
except KeyError:
raise AuthException("Invalid Google Username/password")
await self.get_access_token()
async def get_access_token(self, force_refresh=False):
if not force_refresh and self.check_access_token():
self.log.debug('Using cached Google access token')
return self._access_token
self._access_token = None
self.authenticated = False
self.log.info('Requesting Google access token...')
oauth = partial(perform_oauth, None, self._refresh_token,
self.GOOGLE_LOGIN_ANDROID_ID, self.GOOGLE_LOGIN_SERVICE,
self.GOOGLE_LOGIN_APP, self.GOOGLE_LOGIN_CLIENT_SIG,
proxy=self._proxy)
with ThreadPoolExecutor(max_workers=1) as executor:
token_data = await self.loop.run_in_executor(executor, oauth)
try:
self._access_token = token_data['Auth']
except KeyError:
self._access_token = None
self.authenticated = False
raise AuthException("Could not receive a Google Access Token")
try:
self._access_token_expiry = float(token_data['Expiry'])
except KeyError:
self._access_token_expiry = time() + 7200.0
self.authenticated = True
self.log.info('Google Access Token successfully received.')
self.log.debug('Google Access Token: %s...',
self._access_token[:25])
return self._access_token
| 36.715909 | 127 | 0.662953 | 2,786 | 0.862272 | 0 | 0 | 0 | 0 | 2,259 | 0.699164 | 548 | 0.169607 |
8b6885de235e02a3261fb9ade6f0f1e6618cd36f | 86 | py | Python | samples/src/main/resources/datasets/python/88.py | sritchie/kotlingrad | 8165ed1cd77220a5347c58cded4c6f2bcf22ee30 | [
"Apache-2.0"
]
| 11 | 2020-12-19T01:19:44.000Z | 2021-12-25T20:43:33.000Z | src/main/resources/datasets/python/88.py | breandan/katholic | 081c39f3acc73ff41f5865563debe78a36e1038f | [
"Apache-2.0"
]
| null | null | null | src/main/resources/datasets/python/88.py | breandan/katholic | 081c39f3acc73ff41f5865563debe78a36e1038f | [
"Apache-2.0"
]
| 2 | 2021-01-25T07:59:20.000Z | 2021-08-07T07:13:49.000Z | def test25(a, b):
(a) + (b.x)
(None) + (a[1])
def test0():
return 1, 2, 3
| 14.333333 | 19 | 0.430233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8b68b894928fc1a47949be32739e5721fad32eb5 | 518 | py | Python | voluseg/_tools/evenly_parallelize.py | jingxlim/voluseg | 41429a73a481fbffc3a15457be262ec021304b51 | [
"MIT"
]
| 10 | 2019-11-05T18:49:50.000Z | 2022-03-07T04:15:53.000Z | voluseg/_tools/evenly_parallelize.py | jingxlim/voluseg | 41429a73a481fbffc3a15457be262ec021304b51 | [
"MIT"
]
| 5 | 2021-02-09T20:32:38.000Z | 2021-03-22T16:53:40.000Z | voluseg/_tools/evenly_parallelize.py | jingxlim/voluseg | 41429a73a481fbffc3a15457be262ec021304b51 | [
"MIT"
]
| 3 | 2019-12-09T08:30:18.000Z | 2021-03-22T01:58:44.000Z | def evenly_parallelize(input_list):
'''return evenly partitioned spark resilient distributed dataset (RDD)'''
import numpy as np
from pyspark.sql.session import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
n_input = len(input_list)
n_parts = sc.parallelize(input_list).getNumPartitions()
partitions = np.floor(np.linspace(0, n_parts, n_input, endpoint=False)).astype(int)
return sc.parallelize(zip(partitions, input_list)).partitionBy(n_parts)
| 37 | 87 | 0.747104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.140927 |
8b6908539193ed05f7b55115e992b2c27664607d | 3,153 | py | Python | deepplats/models/utils.py | GuillaumeDMMarion/deep-plats | d1f58d9fe07a7e3e7560fd4b425234fd5512da1a | [
"MIT"
]
| null | null | null | deepplats/models/utils.py | GuillaumeDMMarion/deep-plats | d1f58d9fe07a7e3e7560fd4b425234fd5512da1a | [
"MIT"
]
| null | null | null | deepplats/models/utils.py | GuillaumeDMMarion/deep-plats | d1f58d9fe07a7e3e7560fd4b425234fd5512da1a | [
"MIT"
]
| null | null | null | """Model helper module.
"""
from __future__ import annotations
from typing import Union
import numpy as np
import torch
class Scaler:
"""
Standardize features by removing the mean and scaling to unit variance.
Accepts both torch.Tensor and numpy.ndarray.
"""
def __init__(self, astype="float32"):
self.astype = astype
self.fitted = False
self.mean = None
self.std = None
@staticmethod
def _coerce(
X: Union[np.ndarray, torch.Tensor], astype: str
) -> Union[np.ndarray, torch.Tensor]:
if isinstance(X, np.ndarray):
return X.astype(astype).copy()
elif isinstance(X, torch.Tensor):
return X.type(getattr(torch, astype)).clone()
def fit(self, X: Union[np.ndarray, torch.Tensor]) -> Scaler:
"""Extract mean and std from training."""
mean_kwargs = std_kwargs = {}
if isinstance(X, torch.Tensor):
mean_kwargs = dict(keepdim=True)
std_kwargs = dict(unbiased=False, keepdim=True)
self.mean = float(X.mean(0, **mean_kwargs))
self.std = float(X.std(0, **std_kwargs) + 1e-7)
self.fitted = True
return self
def transform(
self, X: Union[np.ndarray, torch.Tensor]
) -> Union[np.ndarray, torch.Tensor]:
"""Transform array."""
X = self._coerce(X, self.astype)
X -= self.mean
X /= self.std
return X
def inverse_transform(
self, X: Union[np.ndarray, torch.Tensor]
) -> Union[np.ndarray, torch.Tensor]:
"""Transform array."""
X = self._coerce(X, self.astype)
X *= self.std
X += self.mean
return X
def fit_transform(
self, X: Union[np.ndarray, torch.Tensor]
) -> Union[np.ndarray, torch.Tensor]:
"""Fit, then transform array."""
self.fit(X)
return self.transform(X)
class TimeScaler(Scaler):
"""Scaler specific for monotonically increasing timesteps."""
def __init__(self, astype="float32"):
self.step = None
super().__init__(astype=astype)
@staticmethod
def _extract_steps(
X: Union[np.ndarray, torch.Tensor]
) -> Union[np.ndarray, torch.Tensor]:
X_flat = X.flatten()
steps = np.diff(X_flat) if isinstance(X, np.ndarray) else X_flat.diff()
return steps
def fit(self, X: Union[np.ndarray, torch.Tensor]) -> Scaler:
untransformed_steps = self._extract_steps(X)
assert (
np.unique(untransformed_steps).size == 1
), "Time should be monotonically increasing."
fit_res = super().fit(X)
transform = super().transform(X)
self.step = float(self._extract_steps(transform)[0])
return fit_res
class FlattenLSTM(torch.nn.Module):
"""LSTM flattener"""
def __init__(self, last_step: bool = True):
super().__init__()
self.last_step = last_step
def forward(self, X: torch.Tensor) -> torch.Tensor:
"""Default forward method."""
out, (final_out, _) = X
if self.last_step:
return final_out[0]
return out.flatten(1)
| 29.745283 | 79 | 0.598795 | 3,024 | 0.959087 | 0 | 0 | 561 | 0.177926 | 0 | 0 | 450 | 0.142721 |
8b69509f22f3cb70d7f8b98551364109fc2064fa | 1,491 | py | Python | test/utils/test_utils.py | Chick-star/sagemaker-xgboost-container | e06e278b3a34515f79fa73ab770b574b9aafe5f0 | [
"Apache-2.0"
]
| 1 | 2021-07-10T15:08:18.000Z | 2021-07-10T15:08:18.000Z | test/utils/test_utils.py | Chick-star/sagemaker-xgboost-container | e06e278b3a34515f79fa73ab770b574b9aafe5f0 | [
"Apache-2.0"
]
| null | null | null | test/utils/test_utils.py | Chick-star/sagemaker-xgboost-container | e06e278b3a34515f79fa73ab770b574b9aafe5f0 | [
"Apache-2.0"
]
| 1 | 2020-02-07T22:41:34.000Z | 2020-02-07T22:41:34.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import socket
from contextlib import closing
import test.utils.local_mode as localmode
def files_exist(opt_ml, files):
for f in files:
assert localmode.file_exists(opt_ml, f), 'file {} was not created'.format(f)
def predict_and_assert_response_length(data, content_type):
predict_response = localmode.request(data, content_type=content_type)
assert len(predict_response) == len(data)
# From https://stackoverflow.com/a/45690594
def find_two_open_ports():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s1:
s1.bind(('', 0))
s1.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s2:
s2.bind(('', 0))
s2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s1.getsockname()[1], s2.getsockname()[1]
| 35.5 | 84 | 0.7277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.419182 |
8b6aace89e3d825b331240e13aabc132d611171f | 2,584 | py | Python | setup_extension.py | kuwayamamasayuki/FeedValidator-extension-for-GTFS-JP | af01375d0cf99c671a8a49f8f3a7aac2083424bc | [
"Apache-2.0"
]
| 1 | 2020-04-03T09:18:53.000Z | 2020-04-03T09:18:53.000Z | setup_extension.py | kuwayamamasayuki/FeedValidator-extension-for-GTFS-JP | af01375d0cf99c671a8a49f8f3a7aac2083424bc | [
"Apache-2.0"
]
| null | null | null | setup_extension.py | kuwayamamasayuki/FeedValidator-extension-for-GTFS-JP | af01375d0cf99c671a8a49f8f3a7aac2083424bc | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python2.5
# Copyright (C) 2019 KUWAYAMA, Masayuki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import agency
import agency_jp
import stop
import route
import route_jp
import trip
import office_jp
import fareattribute
import farerule
import shape
import feedinfo
import translation
import gtfsfactory
import schedule
def GetGtfsFactory(factory = None):
if not factory:
factory = gtfsfactory.GetGtfsFactory()
# Agency class extension
factory.UpdateClass('Agency', agency.Agency)
# Agency_jp class extension
factory.UpdateClass('Agency_jp', agency_jp.Agency_jp)
# Stop class extension
factory.UpdateClass('Stop', stop.Stop)
# Route class extension
factory.UpdateClass('Route', route.Route)
# Route_jp class extension
factory.UpdateClass('Route_jp', route_jp.Route_jp)
# Trip class extension
factory.UpdateClass('Trip', trip.Trip)
# Office_jp class extension
factory.UpdateClass('Office_jp', office_jp.Office_jp)
# FareAttribute class extension
factory.UpdateClass('FareAttribute', fareattribute.FareAttribute)
# FareRUles class extension
factory.UpdateClass('FareRule', farerule.FareRule)
# Shape class extension
factory.UpdateClass('Shape', shape.Shape)
# FeedInfo class extension
factory.UpdateClass('FeedInfo', feedinfo.FeedInfo)
# Translation class extension
factory.UpdateClass('Translation', translation.Translation)
# Schedule class extension
factory.UpdateClass('Schedule', schedule.Schedule)
return factory
| 28.711111 | 74 | 0.768576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,618 | 0.626161 |
8b6bbd5a925b35697b012e2714a2cfeb198264c6 | 651 | py | Python | api/team_directory/questions/migrations/0003_auto_20200930_0947.py | Hipo/team-directory | dfc999a6b464e88c020cfebe3b569b960b5d7e3d | [
"MIT"
]
| null | null | null | api/team_directory/questions/migrations/0003_auto_20200930_0947.py | Hipo/team-directory | dfc999a6b464e88c020cfebe3b569b960b5d7e3d | [
"MIT"
]
| 2 | 2020-06-05T23:54:21.000Z | 2020-09-30T12:50:16.000Z | api/team_directory/questions/migrations/0003_auto_20200930_0947.py | Hipo/team-directory | dfc999a6b464e88c020cfebe3b569b960b5d7e3d | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.5 on 2020-09-30 09:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0002_create_questions'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='type',
),
migrations.AlterField(
model_name='question',
name='category',
field=models.CharField(choices=[('childhood', 'Childhood'), ('what_if', 'What if...'), ('lifestyle', 'Lifestyle'), ('work_experience', 'Work Experience'), ('hobbies', 'Hobbies')], max_length=255),
),
]
| 28.304348 | 208 | 0.58679 | 558 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.359447 |
8b6d14070d60a3432471d3e5b7787427ad3b6a3d | 565 | py | Python | CursoemVideo/Desafio076.py | davihonorato/Curso-python | 47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0 | [
"MIT"
]
| null | null | null | CursoemVideo/Desafio076.py | davihonorato/Curso-python | 47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0 | [
"MIT"
]
| null | null | null | CursoemVideo/Desafio076.py | davihonorato/Curso-python | 47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0 | [
"MIT"
]
| null | null | null | # Exercício Python 076: Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços, na sequência.
# No final, mostre uma listagem de preços, organizando os dados em forma tabular.
produtos = ('LÁPIS', 1.75,
'BORRACHA', 2,
'CADERNO', 20,
'CANETAS', 7,
'MOCHILA', 120)
print('-'*40)
print(f'{"PRODUTOS":^40}')
print('-'*40)
for c in range(0, len(produtos)):
if c % 2 == 0:
print(f'{produtos[c]:.<30}', end='R$')
else:
print(f'{produtos[c]:>7.2f}')
print('-'*40)
| 31.388889 | 129 | 0.580531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.58669 |
8b6db37566c6d60a2bd9e55330800dc0a7ad705e | 8,558 | py | Python | tests/test_simba.py | SIMBAChain/libsimba.py-platform | a815105a5ed84564c7eafbe01281473cebfb44e5 | [
"MIT"
]
| null | null | null | tests/test_simba.py | SIMBAChain/libsimba.py-platform | a815105a5ed84564c7eafbe01281473cebfb44e5 | [
"MIT"
]
| 2 | 2022-02-25T05:03:13.000Z | 2022-03-09T13:56:56.000Z | tests/test_simba.py | SIMBAChain/libsimba.py-platform | a815105a5ed84564c7eafbe01281473cebfb44e5 | [
"MIT"
]
| null | null | null | import unittest
from unittest.mock import patch
from libsimba.simba import Simba
class TestSimba(unittest.TestCase):
def setUp(self):
self.simba = Simba()
patcher_send = patch("libsimba.simba_request.SimbaRequest.send")
patcher_init = patch("libsimba.simba_request.SimbaRequest.__init__")
self.addCleanup(patcher_send.stop)
self.addCleanup(patcher_init.stop)
self.mock_send = patcher_send.start()
self.mock_init = patcher_init.start()
self.mock_init.return_value = None
def test_submit_transaction_by_address(self):
resp = self.simba.submit_transaction_by_address(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
)
self.mock_send.assert_called_once_with(headers={}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/contract/contract/address/identifier/method/', {}, method='POST')
def test_submit_transaction_by_address_with_params(self):
resp = self.simba.submit_transaction_by_address(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/contract/contract/address/identifier/method/', {"bob": "boby"}, method='POST')
def test_submit_transaction_by_asset(self):
resp = self.simba.submit_transaction_by_asset(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
)
self.mock_send.assert_called_once_with(headers={}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/contract/contract/asset/identifier/method/', {}, method='POST')
def test_submit_transaction_by_asset_with_params(self):
resp = self.simba.submit_transaction_by_asset(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/contract/contract/asset/identifier/method/', {"bob": "boby"}, method='POST')
def test_submit_contract_method(self):
resp = self.simba.submit_contract_method(
"app_id",
"contract",
"method",
{"key": "value"},
)
self.mock_send.assert_called_once_with(headers={}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/contract/contract/method/', {}, method='POST')
def test_submit_contract_method_with_params(self):
resp = self.simba.submit_contract_method(
"app_id",
"contract",
"method",
{"key": "value"},
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/contract/contract/method/', {"bob": "boby"}, method='POST')
def test_submit_transaction_by_address_async(self):
resp = self.simba.submit_transaction_by_address_async(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
)
self.mock_send.assert_called_once_with(headers={}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/async/contract/contract/address/identifier/method/', {}, method='POST')
def test_submit_transaction_by_address_async_with_params(self):
resp = self.simba.submit_transaction_by_address_async(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/async/contract/contract/address/identifier/method/', {"bob": "boby"}, method='POST')
def test_submit_transaction_by_asset_async(self):
resp = self.simba.submit_transaction_by_asset_async(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
)
self.mock_send.assert_called_once_with(headers={}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/async/contract/contract/asset/identifier/method/', {}, method='POST')
def test_submit_transaction_by_asset_async_with_params(self):
resp = self.simba.submit_transaction_by_asset_async(
"app_id",
"contract",
"identifier",
"method",
{"key": "value"},
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/async/contract/contract/asset/identifier/method/', {"bob": "boby"}, method='POST')
def test_submit_contract_method_async(self):
resp = self.simba.submit_contract_method_async(
"app_id",
"contract",
"method",
{"key": "value"},
)
self.mock_send.assert_called_once_with(headers={}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/async/contract/contract/method/', {}, method='POST')
def test_submit_contract_method_async_with_params(self):
resp = self.simba.submit_contract_method_async(
"app_id",
"contract",
"method",
{"key": "value"},
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'}, json_payload={"key": "value"})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/async/contract/contract/method/', {"bob": "boby"}, method='POST')
def test_create_contract_instance(self):
resp = self.simba.create_contract_instance(
"app_id",
"contract",
)
self.mock_send.assert_called_once_with(headers={})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/new/contract/', {}, method='POST')
def test_create_contract_instance_with_params(self):
resp = self.simba.create_contract_instance(
"app_id",
"contract",
query_args={"bob": "boby"},
sender_address="0x1773",
)
self.mock_send.assert_called_once_with(
headers={'txn-sender': '0x1773'})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/new/contract/', {"bob": "boby"}, method='POST')
def test_submit_signed_transaction(self):
resp = self.simba.submit_signed_transaction(
"app_id",
"tnx-id",
{"txn": "data"},
)
self.mock_send.assert_called_once_with(json_payload={'transaction': {'txn': 'data'}})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/transactions/tnx-id/', {}, method='POST')
def test_submit_signed_transaction_with_params(self):
resp = self.simba.submit_signed_transaction(
"app_id",
"tnx-id",
{"txn": "data"},
query_args={"bob": "boby"},
)
self.mock_send.assert_called_once_with(json_payload={'transaction': {'txn': 'data'}})
self.mock_init.assert_called_once_with(
'/v2/apps/app_id/transactions/tnx-id/', {'bob': 'boby'}, method='POST')
| 38.9 | 113 | 0.592545 | 8,470 | 0.989717 | 0 | 0 | 0 | 0 | 0 | 0 | 2,224 | 0.259874 |
8b6dc47fa5a53a344b6d3a7e96adce1b89de4411 | 521 | py | Python | projects/golem_integration/tests/actions/wait_for_element_enabled.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
]
| null | null | null | projects/golem_integration/tests/actions/wait_for_element_enabled.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
]
| null | null | null | projects/golem_integration/tests/actions/wait_for_element_enabled.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
]
| null | null | null | from golem import actions
description = 'Verify wait_for_element_enabled action'
def test(data):
actions.navigate(data.env.url+'dynamic-elements/?delay=3')
actions.wait_for_element_enabled('#button-three', 10)
actions.verify_element_enabled('#button-three')
actions.navigate(data.env.url + 'dynamic-elements/?delay=5')
try:
actions.wait_for_element_enabled('#button-three', 3)
except Exception as e:
assert "Timeout waiting for element #button-three to be enabled" in e.args[0]
| 34.733333 | 85 | 0.729367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.3762 |
8b6f0fc1892ec8aa8153dba6ca257fd87d9c6c75 | 4,263 | py | Python | Sketches/THF/3D/playground/SimpleCube.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
]
| 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/THF/3D/playground/SimpleCube.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
]
| 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/THF/3D/playground/SimpleCube.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
]
| 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================
Simple Cube component
=====================
TODO
"""
import Axon
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from Display3D import Display3D
from Util3D import *
from Object3D import *
class SimpleCube(Object3D):
def __init__(self, **argd):
super(SimpleCube, self).__init__(**argd)
self.grabbed = False
def setup(self):
self.addListenEvents( [pygame.MOUSEMOTION, pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP ])
def draw(self):
# draw faces
glBegin(GL_QUADS)
glColor4f(1.0,0.75,0.75,0.5)
glVertex3f(1.0,1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glColor4f(0.75,1.0,0.75, 0.5)
glVertex3f(1.0,1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor4f(0.75,0.75,1.0, 0.5)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor4f(1.0,0.75,1.0, 0.5)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor4f(0.75,1.0,1.0, 0.5)
glVertex3f(1.0,1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor4f(1.0,1.0,0.75, 0.5)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
def handleEvents(self):
pass
#while self.dataReady("inbox"):
#event = self.recv("inbox")
#if event.type == pygame.MOUSEBUTTONDOWN and self.ogl_name in event.hitobjects:
#if event.button in [1,3]:
#self.grabbed = event.button
#if event.button == 4:
#self.pos.z -= 1
#if event.button == 5:
#self.pos.z += 1
#if event.type == pygame.MOUSEBUTTONUP:
#if event.button in [1,3]:
#self.grabbed = 0
#if event.type == pygame.MOUSEMOTION:
#if self.grabbed == 1:
#self.rot.y += float(event.rel[0])
#self.rot.x += float(event.rel[1])
#self.rot %= 360
#if self.grabbed == 3:
#self.pos.x += float(event.rel[0])/10.0
#self.pos.y -= float(event.rel[1])/10.0
if __name__=='__main__':
class Bunch: pass
class CubeRotator(Axon.Component.component):
def main(self):
while 1:
yield 1
self.send( (0.1, 0.1, 0.1), "outbox")
from Kamaelia.Util.Graphline import Graphline
CUBEC = SimpleCube(pos=Vector(0, 0,-12), name="Center cube").activate()
CUBER = SimpleCube(pos=Vector(4,0,-22), name="Right cube").activate()
CUBEB = SimpleCube(pos=Vector(0,-4,-18), name="Bottom cube").activate()
ROTATOR = CubeRotator().activate()
ROTATOR.link((ROTATOR, "outbox"), (CUBEC, "rel_rotation"))
Axon.Scheduler.scheduler.run.runThreads()
| 31.577778 | 98 | 0.546798 | 2,572 | 0.603331 | 114 | 0.026742 | 0 | 0 | 0 | 0 | 1,646 | 0.386113 |
8b71d0b65eecf04d767d50cdc3d7516cf1940fbe | 236 | py | Python | routers.py | gabrielangelo/revelo-wallet | 3e91117b673e5aaf50773aa180af4117235965c9 | [
"BSD-3-Clause"
]
| null | null | null | routers.py | gabrielangelo/revelo-wallet | 3e91117b673e5aaf50773aa180af4117235965c9 | [
"BSD-3-Clause"
]
| 8 | 2020-02-11T23:50:12.000Z | 2022-03-14T22:51:54.000Z | routers.py | gabrielangelo/revelo-wallet | 3e91117b673e5aaf50773aa180af4117235965c9 | [
"BSD-3-Clause"
]
| null | null | null | from rest_framework.routers import SimpleRouter
from transactions.api.views import TransactionsViewSet
router_v1 = SimpleRouter(trailing_slash=False)
router_v1.register(r'transactions', TransactionsViewSet, base_name='transactions')
| 33.714286 | 82 | 0.855932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.122881 |
8b72e1cc46246e65f5c4487e4423aa24c3c70e6e | 8,480 | py | Python | plugins/modules/waf_domain.py | schrej/ansible-collection-cloud | 1fa1d18aaa06178616af17d8240e8fc5d13a370c | [
"Apache-2.0"
]
| 16 | 2020-09-22T14:45:52.000Z | 2022-02-11T07:56:38.000Z | plugins/modules/waf_domain.py | schrej/ansible-collection-cloud | 1fa1d18aaa06178616af17d8240e8fc5d13a370c | [
"Apache-2.0"
]
| 153 | 2020-08-20T14:00:55.000Z | 2022-03-30T13:48:51.000Z | plugins/modules/waf_domain.py | schrej/ansible-collection-cloud | 1fa1d18aaa06178616af17d8240e8fc5d13a370c | [
"Apache-2.0"
]
| 11 | 2020-09-01T12:21:09.000Z | 2021-12-23T09:48:34.000Z | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: waf_domain
short_description: Add/Modify/Delete WAF domain
extends_documentation_fragment: opentelekomcloud.cloud.otc
version_added: "0.0.3"
author: "Anton Sidelnikov (@anton-sidelnikov)"
description:
- Add/Modify/Delete WAF domain from the OTC.
options:
name:
description: Specifies the domain name.
required: true
type: str
certificate:
description: Specifies the certificate.
type: str
server:
description: Specifies the origin server information.
Each element contains client_protocol (HTTP or HTTPS),
server_protocol (HTTP or HTTPS),
address (IP address or domain name),
port (from 0 to 65535)
type: list
elements: dict
proxy:
description: Specifies whether a proxy is configured.
type: bool
sip_header_name:
description: Specifies the type of the source IP header.
choices: [default, cloudflare, akamai, custom]
type: str
sip_header_list:
description: Specifies the HTTP request header
for identifying the real source IP address.
type: list
elements: str
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
requirements: ["openstacksdk", "otcextensions"]
'''
RETURN = '''
waf_domain:
description: List of dictionaries describing domains matching query.
type: complex
returned: On Success.
contains:
id:
description: Specifies the instance ID.
type: str
hostname:
description: Specifies the domain name.
type: str
cname:
description: Specifies the CNAME value.
type: str
sample: "efec1196267b41c399f2980ea4048517.waf.cloud.com."
policy_id:
description: Specifies the policy ID.
type: str
protect_status:
description: Specifies the WAF mode.
type: int
access_status:
description: Specifies whether a domain name is connected to WAF.
type: int
protocol:
description: Specifies the protocol type.
type: str
certificate_id:
description: Specifies the certificate ID.
type: str
server:
description: Specifies the origin server information.
type: dict
proxy:
description: Specifies whether a proxy is configured.
type: bool
timestamp:
description: Specifies the time when a domain name is created.
type: str
'''
EXAMPLES = '''
# Create Domain.
- waf_domain:
name: test.domain.name
server:
- client_protocol: https
server_protocol: https
address: 4.3.2.1
port: 8080
proxy: False
state: present
# Modify Domain.
- waf_domain:
name: "{{ domain_name }}"
certificate: "{{ cert_name }}"
# Delete Domain.
- waf_domain:
name: "{{ domain_id }}"
state: absent
'''
from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule
class WafDomainModule(OTCModule):
argument_spec = dict(
name=dict(required=True, type='str'),
certificate=dict(required=False),
server=dict(required=False, type='list', elements='dict'),
proxy=dict(required=False, type='bool'),
sip_header_name=dict(required=False, choices=['default', 'cloudflare', 'akamai', 'custom']),
sip_header_list=dict(required=False, type='list', elements='str'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict(
supports_check_mode=True
)
otce_min_version = '0.9.0'
def _check_server_client_protocol(self, server: list):
for srv in server:
if srv['client_protocol'] == 'HTTPS':
return True
return False
def _compare_servers_list(self, old, new):
pairs = zip(old, new)
return any(x != y for x, y in pairs)
def run(self):
name_filter = self.params['name']
domain = None
changed = False
domain = self.conn.waf.find_domain(name_or_id=name_filter, ignore_missing=True)
if domain:
if not domain.server:
domain = self.conn.waf.get_domain(domain.id)
if self.params['state'] == 'absent':
changed = False
if domain:
if self.ansible.check_mode:
self.exit_json(changed=True)
self.conn.waf.delete_domain(domain)
changed = True
elif self.params['state'] == 'present':
query = {}
certificate_filter = self.params['certificate']
server_filter = self.params['server']
proxy_filter = self.params['proxy']
sip_header_name_filter = self.params['sip_header_name']
sip_header_list_filter = self.params['sip_header_list']
if name_filter:
query['name'] = name_filter
if certificate_filter:
try:
res = self.conn.waf.find_certificate(name_or_id=certificate_filter)
query['certificate_id'] = res.id
except self.sdk.exceptions.ResourceNotFound:
self.fail_json(msg='certificate not found.')
if server_filter:
for srv in server_filter:
srv['client_protocol'] = srv['client_protocol'].upper()
srv['server_protocol'] = srv['server_protocol'].upper()
if server_filter and not domain:
if self._check_server_client_protocol(server_filter):
if not certificate_filter:
self.fail_json(msg='certificate should by specified'
' when client_protocol is equal to HTTPS.')
query['server'] = server_filter
if proxy_filter and not domain:
query['proxy'] = proxy_filter
if not sip_header_name_filter and not sip_header_list_filter:
self.fail_json(msg='sip_header_name and sip_header_list'
' should by specified when proxy is set to true.')
else:
query['sip_header_name'] = sip_header_name_filter
query['sip_header_list'] = sip_header_list_filter
if domain:
mquery = {}
if certificate_filter:
if domain.certificate_id != query['certificate_id']:
mquery['certificate_id'] = query['certificate_id']
if proxy_filter:
if domain.proxy != proxy_filter:
mquery['proxy'] = proxy_filter
if sip_header_name_filter:
if domain.sip_header_name != sip_header_name_filter:
mquery['sip_header_name'] = sip_header_name_filter
if sip_header_list_filter:
if domain.sip_header_list != sip_header_list_filter:
mquery['sip_header_list'] = sip_header_list_filter
if server_filter:
if self._compare_servers_list(old=domain.server, new=server_filter):
mquery['server'] = server_filter
if self.ansible.check_mode:
self.exit_json(changed=True)
domain = self.conn.waf.update_domain(domain, **mquery)
self.exit(
changed=True,
waf_domain=domain.to_dict()
)
if self.ansible.check_mode:
self.exit_json(changed=True)
domain = self.conn.waf.create_domain(**query)
self.exit(
changed=True,
waf_domain=domain.to_dict()
)
self.exit(changed=changed)
def main():
module = WafDomainModule()
module()
if __name__ == '__main__':
main()
| 33.254902 | 100 | 0.610024 | 4,891 | 0.576769 | 0 | 0 | 0 | 0 | 0 | 0 | 4,001 | 0.471816 |
8b73af8b167c0c808ac06e682936f0020d7644ea | 2,104 | py | Python | python/raft/NodeState.py | chenzhaoplus/vraft | 73fe880289061cfbb62aa33b8e5c7d012543bb9d | [
"Apache-2.0"
]
| 23 | 2020-05-17T04:22:17.000Z | 2022-02-22T02:09:34.000Z | python/raft/NodeState.py | chenzhaoplus/vraft | 73fe880289061cfbb62aa33b8e5c7d012543bb9d | [
"Apache-2.0"
]
| 1 | 2020-10-22T11:47:54.000Z | 2020-10-22T11:47:54.000Z | python/raft/NodeState.py | chenzhaoplus/vraft | 73fe880289061cfbb62aa33b8e5c7d012543bb9d | [
"Apache-2.0"
]
| 11 | 2020-07-11T07:12:19.000Z | 2022-03-23T08:24:15.000Z | import collections
from cluster import Cluster
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
VoteResult = collections.namedtuple('VoteResult', ['term', 'vote_granted', 'id'])
class NodeState:
def __init__(self, node=None):
self.cluster = Cluster()
self.node = node
self.id = node.id
self.current_term = 0
self.vote_for = None # node.id of the voted candidate
# input: candidate (id, current_term, lastLogIndex, lastLogTerm)
# output: vote_granted (true/false), term (current_term, for candidate to update itself)
# rule:
# 1. return false if candidate.term < current_term
# 2. return true if (voteFor is None or voteFor==candidate.id) and candidate's log is newer than receiver's
def vote(self, vote_request):
term = vote_request['term']
candidate_id = vote_request['candidate_id']
if term > self.current_term:
logging.info(f'{self} approves vote request since term: {term} > {self.current_term}')
self.vote_for = candidate_id
self.current_term = term
return VoteResult(True, self.current_term, self.id)
if term < self.current_term:
logging.info(f'{self} rejects vote request since term: {term} < {self.current_term}')
return VoteResult(False, self.current_term, self.id)
# vote_request.term == self.current_term
if self.vote_for is None or self.vote_for == candidate_id:
# TODO check if the candidate's log is newer than receiver's
self.vote_for = candidate_id
return VoteResult(True, self.current_term, self.id)
logging.info(f'{self} rejects vote request since vote_for: {self.vote_for} != {candidate_id}')
return VoteResult(False, self.current_term, self.id)
# another thread might change the state into Follower when got heartbeat
# only candidate could return True
# it returns False for both Leader and Follower
def win(self):
return False
| 42.08 | 113 | 0.66635 | 1,843 | 0.875951 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.444867 |
8b73f4c7986a2f8c2bd94b366f876d38ceb6a037 | 303 | py | Python | 06source_code/service-center/language-service/bin/language_service.py | dumengnan/unicorn | 165330ff8e01bc18e3eca2d8ecf23b5d955f155b | [
"Apache-2.0"
]
| null | null | null | 06source_code/service-center/language-service/bin/language_service.py | dumengnan/unicorn | 165330ff8e01bc18e3eca2d8ecf23b5d955f155b | [
"Apache-2.0"
]
| 8 | 2020-01-28T22:31:03.000Z | 2022-03-02T03:37:47.000Z | 06source_code/service-center/language-service/bin/language_service.py | dumengnan/unicorn | 165330ff8e01bc18e3eca2d8ecf23b5d955f155b | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# encoding: utf-8
import _load_lib
import sys
import logging
import os
from unicorn.language.app\
import main as languae_main
if __name__ == '__main__':
try:
languae_main()
except Exception as ex:
logging.exception("main except")
os._exit(1)
| 16.833333 | 40 | 0.673267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.20132 |
8b75a7eaacdb476c970a5cf2013b558edc778b20 | 10,303 | py | Python | incremental_evaluation_run.py | comrob/ensgendel | 4958d588a30a5bc60c6e7af5abb2b830b1265a25 | [
"BSD-3-Clause"
]
| null | null | null | incremental_evaluation_run.py | comrob/ensgendel | 4958d588a30a5bc60c6e7af5abb2b830b1265a25 | [
"BSD-3-Clause"
]
| null | null | null | incremental_evaluation_run.py | comrob/ensgendel | 4958d588a30a5bc60c6e7af5abb2b830b1265a25 | [
"BSD-3-Clause"
]
| null | null | null | import incremental_evaluation.utils as IE
import incremental_evaluation.scenario_sets as SS
import incremental_evaluation.visualisation_helper as VH
import models.basic_predictor_interfaces
import models.ensgendel_interface
import incremental_evaluation.data_file_helper as DFH
import os
import argparse
SS_MNIST012 = "mnist012"
SS_MNIST197 = "mnist197"
SS_MNIST_CN5 = "mnist_cn5"
SS_GAUSS3 = "gauss_3"
RESULTS = os.path.join("results", "incremental_evaluation_run")
def datafile_path(_experiment_name, _scenario_set_name, _trial_tag):
return os.path.join(RESULTS, "{}_{}_{}".format(_experiment_name, _scenario_set_name, _trial_tag))
def stat_cell_format(stats, iteration):
return "{:.2f}({:.2f})".format(stats["mean"][iteration], stats["std"][iteration])
parser = argparse.ArgumentParser(description="EnsGenDel algorithm & Incremental evaluation framework.\n"
"The continual learning algorithms are evaluated in predefined scenarios."
"For example: [{0:[9,7]}, {0:[8], 1:[7]}] is a scenario of two tasks."
"In the first task {0: [9, 7]} the predictor gets training instances of "
"nines and sevens images labeled as 0. In the second task {0:[8], 1:[7]} "
"the predictor gets training instances of eights labeled as 0 and "
"sevens labeled as 1. Note that the sevens changed the label. After the "
"second task the predictor should classify nines and eights as 0 and "
"sevens as 1.\n"
"The scenario is encoded into bracket-less notation in filenames, e.g., "
"[{0:[9,7]}, {0:[8], 1:[7]}] -> T0x97T0x8a1x7 (any resemblance with "
"hexadecimals is purely coincidental).")
parser.add_argument('experiment_name', help="Experiment name which will be in file prefix.")
parser.add_argument('scenario_name', help="Select the scenario. One of the following: " + str([
SS_MNIST012, SS_MNIST197, SS_MNIST_CN5, SS_GAUSS3]) + "The scenario name is appended after experiment_name.")
parser.add_argument('modes',
help="Series of numbers activating five modes of this application:"
"1:scenario preview; 2:predictor training; 3:debug evaluation; "
"4:generate csv table with evaluation stats; 5:generate accuracy plots"
";e.g., '24' trains the predictors and then generates csv table with results.")
parser.add_argument('--trials', type=int, default=1, help="Number of independent runs. The trial number is appended "
"in the postfix of the file.")
parser.add_argument('--trials_from', type=int, default=0, help="Index of the first trial.")
parser.add_argument('--scout_number', type=int, default=-1, help="Cropping the training set. Speeding up the training "
"at the cost of less accuracy.")
parser.add_argument("--debug", default=False, type=bool, help="Runs only light weight models. True/False")
if __name__ == '__main__':
args = parser.parse_args()
# Experiment setup
trial_tags = [i for i in range(args.trials_from, args.trials_from + args.trials)]
experiment_name = args.experiment_name
scout_subset = args.scout_number if args.scout_number > 0 else None
scenario_set_name = args.scenario_name
mode = list(map(int, args.modes))
# mode += [1] # show scenario data
# mode += [2] # run predictor learning on scenarios
# mode += [3] # evaluate predictors scenarios
# mode += [4] # write accuracy statistics into table
# mode += [5] # write accuracy statistics into table
# list of predictor classes that implement the incremental_evaluation.interfaces.Predictor
if args.debug:
predictor_builders = [
models.basic_predictor_interfaces.SGD,
models.basic_predictor_interfaces.Perceptron,
]
else:
predictor_builders = [
models.ensgendel_interface.Ensgendel,
models.ensgendel_interface.Ensgen,
models.ensgendel_interface.Ens,
models.basic_predictor_interfaces.Perceptron,
]
# scenario sets implementing the incremental_evaluation.interfaces.ScenarioSet
if scenario_set_name == SS_MNIST012:
scenario_set = SS.MnistMinimalScenarios(digits_tripplet=(0, 1, 2), debug_set=False, scout_subset=scout_subset)
visualiser = VH.mnist_visualiser
elif scenario_set_name == SS_MNIST197:
scenario_set = SS.MnistMinimalScenarios(digits_tripplet=(1, 9, 7), debug_set=False, scout_subset=scout_subset)
visualiser = VH.mnist_visualiser
elif scenario_set_name == SS_MNIST_CN5:
scenario_set = SS.MnistConvergentFiveScenarios(scout_subset=scout_subset)
visualiser = VH.mnist_visualiser
elif scenario_set_name == SS_GAUSS3:
scenario_set = SS.Gauss3DMinimalScenarios(train_size=scout_subset)
visualiser = VH.gauss3d_visualiser
else:
raise NotImplementedError(scenario_set_name)
# setting up basic directories
if not os.path.exists("results"):
os.mkdir("results")
if not os.path.exists(RESULTS):
os.mkdir(RESULTS)
# Pre-flight check of the scenario
if 1 in mode:
scenarios = scenario_set.get_scenarios()
train_sam, train_sub = scenario_set.get_training_set()
test_sam, test_sub = scenario_set.get_test_set()
for scenario in scenarios:
folder_name = "preview_{}".format(VH.scenario_into_filename(str(scenario)))
folder_path = os.path.join(RESULTS, folder_name)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
VH.show_scenario(scenario, test_sam, test_sub, visualiser, save_into=folder_path)
# Cycle of experiment runs
for trial_tag in trial_tags:
experiment_path = datafile_path(experiment_name, scenario_set_name, trial_tag)
if not os.path.exists(experiment_path):
os.mkdir(experiment_path)
if 2 in mode:
DFH.run_and_save(predictor_builders, scenario_set, experiment_path)
if 3 in mode:
evals = DFH.datafile_evaluation(experiment_path, {
DFH.TOTAL_ACCURACY: IE.evaluate_task_total_accuracy,
DFH.LOCAL_ACCURACY: IE.evaluate_task_accuracy,
DFH.SUBCLASS_ACCURACY: IE.evaluate_subclass_accuracy,
})
print(evals)
# Stats evaluation
files = [datafile_path(experiment_name, scenario_set_name, trial_tag) for trial_tag in trial_tags]
portfolio = dict([(str(clazz), files) for clazz in predictor_builders])
if 4 in mode:
eval_stats_total = DFH.extract_stats_for_portfolio(portfolio, over_testing_set=True,
task_accuracy_type=DFH.TOTAL_ACCURACY)
table = VH.stats_into_text_table(eval_stats_total, stat_cell_format, cell_join=';', row_join='\n')
print(table)
table_path = os.path.join(RESULTS, "{}_{}_total_accuracy.csv".format(experiment_name, scenario_set_name))
with open(table_path, "w") as fil:
fil.write(table)
print("Saved stats of total accuracy into {}".format(table_path))
if 5 in mode:
figure_styles = [
[("color", "r"), ("marker", "o")],
[("color", "g"), ("marker", "^")],
[("color", "b"), ("marker", "x")],
[("color", "c"), ("marker", "s")],
[("color", "m"), ("marker", "d")],
[("color", "y"), ("marker", "+")],
[("color", "k"), ("marker", "*")],
]
classifier_style = dict(
[(str(clazz), dict([("label", clazz.__name__)] + figure_styles[i % len(figure_styles)]))
for i, clazz in enumerate(predictor_builders)]
)
eval_stats_total = DFH.extract_stats_for_portfolio(portfolio, over_testing_set=True,
task_accuracy_type=DFH.TOTAL_ACCURACY)
scenarios = list(eval_stats_total[list(eval_stats_total.keys())[0]].keys())
print(scenarios)
for i, scenario in enumerate(scenarios):
# picking subclass for tracking
scenario_obj = eval(scenario)
tracked_label = list(scenario_obj[0].keys())[0]
tracked_subclass = scenario_obj[0][tracked_label][-1]
# tracking the selected subclass label assignment
def tracked_evaluation(_scen, _pred, _subs): # lambda for tracking
return IE.evaluate_selected_subclass_accuracy(_scen, _pred, _subs, tracked_subclass, tracked_label)
eval_stats_tracked = DFH.extract_stats_for_portfolio(
portfolio, over_testing_set=True, task_accuracy_type=None, evaluator=tracked_evaluation)
# titles and names
_scenario_str = scenario
if type(scenario) is bytes:
_scenario_str = scenario.decode('ASCII') # sometimes hdf5 returns bytes instead of strings
test_task = str(IE.get_perfect_task_map(scenario_obj, len(scenario_obj) - 1))
tracked_task = "{{{}: [{}]}}".format(tracked_label, tracked_subclass)
title = "Scenario: {}\ntest task {}(full), tracked assignment {}(dashed)".format(
_scenario_str, test_task, tracked_task)
# visualisaiton
fig_path = os.path.join(RESULTS, "{}_{}_{}_accuracy.pdf".format(experiment_name, scenario_set_name,
VH.scenario_into_filename(_scenario_str)))
VH.show_metric_evol(eval_stats_total, scenario, classifier_style,
fig_path=fig_path, tracked_eval_stats=eval_stats_tracked, title=title)
print("fig of scenario {} saved into {}".format(scenario, fig_path))
| 54.803191 | 119 | 0.625837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,752 | 0.267107 |
8b76c38f1e29d8bf142d3e3373941067e32aadc6 | 15,792 | py | Python | core/models.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
]
| null | null | null | core/models.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
]
| null | null | null | core/models.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
]
| null | null | null | import os
import re
from bs4 import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.db.models import Case, Count, Q, Value, When
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import mark_safe
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import Tag, TaggedItemBase
from core import panels
from core.forms import SubmitFormBuilder
from core.utilities import has_recaptcha, validate_only_one_instance
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailsearch import index
from wagtailcaptcha.models import WagtailCaptchaEmailForm
class IndexPage(models.Model):
"""
Abstract Index Page class. Declare a couple of abstract methods that should be implemented by
any class implementing this 'interface'.
"""
def clean(self):
validate_only_one_instance(self)
def children(self):
raise NotImplementedError("Class %s doesn't implement aMethod()" % (self.__class__.__name__))
def get_context(self, request, *args, **kwargs):
raise NotImplementedError("Class %s doesn't implement aMethod()" % (self.__class__.__name__))
class Meta:
abstract = True
class HomePage(Page, IndexPage):
"""
HomePage class, inheriting from wagtailcore.Page straight away
"""
subpage_types = [
'core.CompanyIndex',
'core.SubmitFormPage',
]
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = []
body = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul', 'link', 'cleanhtml'])
@property
def og_image(self):
# Returns image and image type of feed_image, if exists
image = {'image': None, 'type': None}
if self.feed_image:
image['image'] = self.feed_image
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
def children(self):
return self.get_children().live()
def get_context(self, request, *args, **kwargs):
# Get pages
pages = WagtailSitePage.objects\
.live()\
.descendant_of(self)\
.order_by('-is_featured', '-latest_revision_created_at')
# Filter by tag
tag = request.GET.get('tag')
if tag:
pages = pages.filter(tags__slug__iexact=tag)
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12) # Show 12 pages per page
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(HomePage, self).get_context(request, *args, **kwargs)
context['pages'] = pages
context['tag'] = tag
# Only tags used by live pages
context['tags'] = Tag.objects.filter(
core_pagetag_items__isnull=False,
core_pagetag_items__content_object__live=True
).annotate(count=Count('core_pagetag_items')).distinct().order_by('-count', 'name')
return context
class Meta:
verbose_name = "Home Page"
content_panels = panels.HOME_PAGE_CONTENT_PANELS
promote_panels = panels.WAGTAIL_PAGE_PROMOTE_PANELS
class CompanyIndex(Page, IndexPage):
"""
HomePage class, inheriting from wagtailcore.Page straight away
"""
parent_types = ['core.HomePage']
subpage_types = ['core.WagtailCompanyPage']
search_fields = []
body = RichTextField(null=True, blank=True, features=['bold', 'italic', 'ol', 'ul', 'link', 'cleanhtml'])
show_map = models.BooleanField(default=False, help_text='Show map of companies around the world.')
def children(self):
return self.get_children().live()
def get_context(self, request, *args, **kwargs):
# Get pages.
# Note: `numchild` includes draft/unpublished pages but does not create additional queries.
pages = WagtailCompanyPage.objects\
.live()\
.descendant_of(self)\
.distinct()\
.order_by('-numchild', '-latest_revision_created_at')
# Filter by tag
tag = request.GET.get('tag')
if tag:
pages = pages.filter(tags__name__iexact=tag)
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12)
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(CompanyIndex, self).get_context(request, *args, **kwargs)
context['pages'] = pages
context['tag'] = tag
return context
class Meta:
verbose_name = "Companies Index Page"
content_panels = panels.WAGTAIL_COMPANY_INDEX_PAGE_CONTENT_PANELS
class PageTag(TaggedItemBase):
content_object = ParentalKey('core.WagtailPage', related_name='tagged_items')
# Main core Page model. All main content pages inherit from this class.
class WagtailPage(Page):
"""
Our main custom Page class. All content pages should inherit from this one.
"""
parent_types = ['core.HomePage']
subpage_types = ['core.WagtailPage']
is_creatable = False
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
body = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul', 'link', 'cleanhtml'])
tags = ClusterTaggableManager(through=PageTag, blank=True)
search_fields = []
@property
def parent(self):
try:
return self.get_ancestors().reverse()[0]
except IndexError:
return None
@property
def child(self):
for related_object in self._meta.get_all_related_objects():
if not issubclass(related_object.model, self.__class__):
continue
try:
return getattr(self, related_object.get_accessor_name())
except ObjectDoesNotExist:
pass
@property
def body_text(self):
return BeautifulSoup(self.body, "html5lib").get_text()
@property
def body_excerpt(self):
"""
Return body text replacing end of lines (. ? ! chars) with a blank space
"""
return re.sub(r'([\.?!])([a-zA-Z])', r'\1 \2', self.body_text)
@property
def og_image(self):
# Returns image and image type of feed_image or image as fallback, if exists
image = {'image': None, 'type': None}
if self.feed_image:
image['image'] = self.feed_image
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
class Meta:
verbose_name = "Content Page"
content_panels = panels.WAGTAIL_PAGE_CONTENT_PANELS
promote_panels = panels.WAGTAIL_PAGE_PROMOTE_PANELS
class WagtailCompanyPage(WagtailPage):
"""
Company page listing a bunch of site pages
"""
parent_types = ['core.HomePage']
subpage_types = ['core.WagtailSitePage']
SITES_ORDERING_ALPHABETICAL = 'alphabetical'
SITES_ORDERING_CREATED = 'created'
SITES_ORDERING_PATH = 'path'
SITES_ORDERING = {
SITES_ORDERING_PATH: {
'name': 'Path (i.e. manual)',
'ordering': ['-path'],
},
SITES_ORDERING_ALPHABETICAL: {
'name': 'Alphabetical',
'ordering': ['title'],
},
SITES_ORDERING_CREATED: {
'name': 'Created',
'ordering': ['-first_published_at'],
},
}
SITES_ORDERING_CHOICES = [
(key, opts['name'])
for key, opts in sorted(SITES_ORDERING.items(), key=lambda k: k[1]['name'])
]
company_url = models.URLField(
blank=True,
null=True,
help_text='The URL of your site, something like "https://www.springload.co.nz"',
)
github_url = models.URLField(null=True, blank=True)
twitter_url = models.URLField(null=True, blank=True)
location = models.CharField(max_length=128, blank=True, null=True)
show_map = models.BooleanField(default=True, help_text='Show company in the map of companies around the world.')
coords = models.CharField(max_length=255, blank=True, null=True)
logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
sites_ordering = models.CharField(
max_length=20,
blank=False,
choices=SITES_ORDERING_CHOICES,
default=SITES_ORDERING_CREATED,
help_text='The order the sites will be listed on the page',
)
search_fields = Page.search_fields + [
index.SearchField('company_url', boost=1),
index.SearchField('body_text', boost=1)
]
@property
def lat(self):
if self.coords:
return self.coords.split(",")[0].strip()
else:
return None
@property
def lon(self):
if self.coords:
return self.coords.split(",")[1].strip()
else:
return None
@property
def twitter_handler(self):
if self.twitter_url:
return "@%s" % self.twitter_url.strip('/ ').split("/")[-1]
else:
return None
@property
def github_user(self):
if self.github_url:
return self.github_url.strip('/ ').split("/")[-1]
else:
return None
@property
def children_count(self):
return self.children().count()
@property
def og_image(self):
# Returns image and image type of logo or feed_image as fallback, if exists
image = {'image': None, 'type': None}
if self.logo:
image['image'] = self.logo
elif self.feed_image:
image['image'] = self.feed_image
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
def children(self):
user_ordering = self.SITES_ORDERING[self.sites_ordering]['ordering']
pages = WagtailSitePage.objects.live().filter(Q(path__startswith=self.path) | Q(in_cooperation_with=self))
# When ordering by `path`, the collaborations would either all be listed first or last
# depending on whether the collaborator(s) page(s) was created before or after this page.
# Adding an overwrite here so collaborations always appear last.
if self.sites_ordering == self.SITES_ORDERING_PATH:
pages = pages.annotate(
is_own=Case(
When(path__startswith=self.path, then=Value(True)),
default_value=Value(False),
output_field=models.BooleanField(),
)
).order_by('is_own', *user_ordering)
# When ordering alphabetically or by creation date,
# own sites and collaboration sites will be sorted together.
else:
pages = pages.order_by(*user_ordering)
return pages
def get_context(self, request, *args, **kwargs):
# Get pages
pages = self.children()
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12) # Show 12 pages per page
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(WagtailCompanyPage, self).get_context(request, *args, **kwargs)
context['pages'] = pages
return context
@property
def sites_count(self):
# Note: It uses `self.numchild` which counts draft/unpublished pages but does not create additional queries.
return self.get_children_count()
class Meta:
verbose_name = "Company Page"
content_panels = panels.WAGTAIL_COMPANY_PAGE_CONTENT_PANELS
settings_panels = panels.WAGTAIL_COMPANY_PAGE_SETTINGS_PANELS
@python_2_unicode_compatible
class WagtailSitePage(WagtailPage):
"""
Site page
"""
parent_types = ['core.WagtailCompanyPage']
subpage_types = []
is_featured = models.BooleanField(
"Featured",
default=False,
blank=False,
help_text='If enabled, this site will appear on top of the sites list of the homepage.'
)
site_screenshot = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text=mark_safe(
'Use a <b>ratio</b> of <i>16:13.28</i> '
'and a <b>size</b> of at least <i>1200x996 pixels</i> '
'for an optimal display.'
),
)
site_url = models.URLField(
blank=True,
null=True,
help_text='The URL of your site, something like "https://www.springload.co.nz"',
)
in_cooperation_with = models.ForeignKey(
'core.WagtailCompanyPage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
search_fields = Page.search_fields + [
index.SearchField('site_url'),
index.SearchField('body_text')
]
@property
def og_image(self):
# Returns image and image type of feed_image, if exists
image = {'image': None, 'type': None}
if self.feed_image:
image['image'] = self.feed_image
elif self.site_screenshot:
image['image'] = self.site_screenshot
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
def __str__(self):
if self.site_url:
return '%s - %s' % (self.title, self.site_url)
return self.title
class Meta:
verbose_name = "Site Page"
content_panels = panels.WAGTAIL_SITE_PAGE_CONTENT_PANELS
promote_panels = panels.WAGTAIL_SITE_PAGE_PROMOTE_PANELS
class SubmitFormField(AbstractFormField):
page = ParentalKey('SubmitFormPage', related_name='form_fields')
class SubmitFormPage(WagtailCaptchaEmailForm if has_recaptcha() else AbstractEmailForm):
"""
Form page, inherits from WagtailCaptchaEmailForm if available, otherwise fallback to AbstractEmailForm
"""
def __init__(self, *args, **kwargs):
super(SubmitFormPage, self).__init__(*args, **kwargs)
# WagtailCaptcha does not respect cls.form_builder and overwrite with its own.
# See https://github.com/springload/wagtail-django-recaptcha/issues/7 for more info.
self.form_builder = SubmitFormBuilder
parent_types = ['core.HomePage']
subpage_types = []
search_fields = []
body = RichTextField(blank=True, help_text='Edit the content you want to see before the form.')
thank_you_text = RichTextField(blank=True, help_text='Set the message users will see after submitting the form.')
class Meta:
verbose_name = "Form Page"
content_panels = panels.SUBMIT_FORM_PAGE_CONTENT_PANELS
| 31.967611 | 117 | 0.634435 | 14,748 | 0.933891 | 0 | 0 | 4,804 | 0.304205 | 0 | 0 | 3,876 | 0.245441 |
8b772e552dd2f4d89f3edbd1233977b33bf49895 | 542 | py | Python | solutions/593_valid_square.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
]
| null | null | null | solutions/593_valid_square.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
]
| null | null | null | solutions/593_valid_square.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
]
| null | null | null | class Solution:
def validSquare(self, p1: List[int], p2: List[int], p3: List[int], p4: List[int]) -> bool:
"""Math.
Running time: O(1)
"""
v = [p1, p2, p3, p4]
e = []
for i, p in enumerate(v):
for j, q in enumerate(v[i+1:]):
e.append(((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2, q[0] - p[0], q[1] - p[1]))
e.sort(key=lambda x: x[0])
return e[0][0] > 0 and e[-1][0] == e[-2][0] and e[-1][1] * e[-2][1] + e[-1][2] * e[-2][2] == 0
| 36.133333 | 102 | 0.389299 | 533 | 0.983395 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.088561 |
8b77b588bbd23056762b56f743c2a98bf3afca31 | 868 | py | Python | calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/cert_script.py | josrolgil/exjobbCalvin | 976459eaa50246586360c049b9880d753623d574 | [
"Apache-2.0"
]
| 1 | 2016-05-10T22:36:31.000Z | 2016-05-10T22:36:31.000Z | calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/cert_script.py | josrolgil/exjobbCalvin | 976459eaa50246586360c049b9880d753623d574 | [
"Apache-2.0"
]
| null | null | null | calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/cert_script.py | josrolgil/exjobbCalvin | 976459eaa50246586360c049b9880d753623d574 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
from calvin.utilities import certificate
import os
print "Trying to create a new domain configuration."
testconfig = certificate.Config(domain="test")
# testconfig2 = certificate.Config(domain="evil")
print "Reading configuration successfull."
print "Creating new domain."
certificate.new_domain(testconfig)
# certificate.new_domain(testconfig2)
print "Created new domain."
for i in range(1, 5):
for j in range(0, 6):
name = "node{}:{}".format(i, j)
certreq = certificate.new_runtime(testconfig, name)
certificate.sign_req(testconfig, os.path.basename(certreq), name)
certreq = certificate.new_runtime(testconfig, "evil")
certificate.sign_req(testconfig, os.path.basename(certreq), "evil")
# certreq = certificate.new_runtime(testconfig, "evil2")
# certificate.sign_req(testconfig2, os.path.basename(certreq), "evil2")
| 36.166667 | 73 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 384 | 0.442396 |
8b78243e120efed83eabeee3ef9fab1fbb90cb9c | 2,526 | py | Python | users/migrations/0001_initial.py | bhaveshpraveen/VIT-Pugaar | 0a33b264939287071ddaffef4ab1f2ef9a38de87 | [
"MIT"
]
| null | null | null | users/migrations/0001_initial.py | bhaveshpraveen/VIT-Pugaar | 0a33b264939287071ddaffef4ab1f2ef9a38de87 | [
"MIT"
]
| 6 | 2017-11-11T08:43:55.000Z | 2021-06-10T19:38:24.000Z | users/migrations/0001_initial.py | bhaveshpraveen/VIT-Pugaar | 0a33b264939287071ddaffef4ab1f2ef9a38de87 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-23 05:30
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('hostel', '0001_initial'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('registration_number', models.CharField(max_length=15, primary_key=True, serialize=False, unique=True)),
('email', models.EmailField(max_length=255, verbose_name='email address')),
('first_name', models.CharField(max_length=50)),
('middle_name', models.CharField(blank=True, max_length=50)),
('last_name', models.CharField(max_length=50)),
('phone_number', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{10}$')])),
('is_active', models.BooleanField(default=True)),
('admin', models.BooleanField(default=False)),
('staff', models.BooleanField(default=False)),
('room_no', models.IntegerField(blank=True, null=True)),
('block', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to='hostel.Block')),
('floor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to='hostel.Floor')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 56.133333 | 266 | 0.641726 | 2,305 | 0.91251 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.288599 |
8b789e81aabae4e0b2a7953dacad2a13826e5a3e | 93 | py | Python | runoob100/032.py | GenweiWu/PythonDemo | 957bacb6fc0eb0bc37c4af7a64220d8aa58189ba | [
"MIT"
]
| null | null | null | runoob100/032.py | GenweiWu/PythonDemo | 957bacb6fc0eb0bc37c4af7a64220d8aa58189ba | [
"MIT"
]
| null | null | null | runoob100/032.py | GenweiWu/PythonDemo | 957bacb6fc0eb0bc37c4af7a64220d8aa58189ba | [
"MIT"
]
| null | null | null | # _*_ coding:utf-8 _*_
#按相反的顺序输出列表的值。
arr=["aaa",True,100,"ccc"]
print arr
print arr[::-1] | 11.625 | 26 | 0.634409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.605042 |
8b790365631a765420b493cba01b292fac4bc258 | 475 | py | Python | ArrangingCoins.py | Jcarlos0828/LeetCode-PracticeResults | 73566a131629038caf2555eaf4999379227ec369 | [
"MIT"
]
| 1 | 2019-06-26T22:44:16.000Z | 2019-06-26T22:44:16.000Z | ArrangingCoins.py | Jcarlos0828/LeetCode-PracticeResults | 73566a131629038caf2555eaf4999379227ec369 | [
"MIT"
]
| null | null | null | ArrangingCoins.py | Jcarlos0828/LeetCode-PracticeResults | 73566a131629038caf2555eaf4999379227ec369 | [
"MIT"
]
| null | null | null | '''
EASY 441. Arranging Coins
You have a total of n coins that you want to form in
a staircase shape, where every k-th row must have exactly k coins.
'''
class Solution:
def arrangeCoins(self, n: int) -> int:
rows = [0]
count = 1
def recur(n, count):
if n - count >= 0:
rows[0] += 1
count += 1
recur(n-count+1, count)
return
recur(n, count)
return rows[0] | 26.388889 | 66 | 0.511579 | 319 | 0.671579 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.326316 |
8b7cf31b94df4dc51935676b554357efa86d4611 | 1,167 | py | Python | stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/test_rnn.py | marielacour81/CBIG | 511af756c6ddabbd3a9681ce3514b79ef5aaaf3f | [
"MIT"
]
| 6 | 2020-03-03T22:23:07.000Z | 2021-11-27T06:11:02.000Z | stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/test_rnn.py | marielacour81/CBIG | 511af756c6ddabbd3a9681ce3514b79ef5aaaf3f | [
"MIT"
]
| null | null | null | stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/test_rnn.py | marielacour81/CBIG | 511af756c6ddabbd3a9681ce3514b79ef5aaaf3f | [
"MIT"
]
| 2 | 2020-05-27T20:24:03.000Z | 2021-04-14T07:51:44.000Z | # Written by Minh Nguyen and CBIG under MIT license:
# https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
import unittest
import torch
import cbig.Nguyen2020.rnn as rnn
class RnnCellTest(unittest.TestCase):
""" Unit tests for recurrent cells """
def setUp(self):
torch.manual_seed(0)
self.in_features = 10
self.hidden_size = 20
self.batch_size = 3
self.length = 15
def test_MinimalRNNCell(self):
cell = rnn.MinimalRNNCell(self.in_features, self.hidden_size)
seq = torch.randn(self.length, self.batch_size, self.in_features)
h_t = torch.randn(self.batch_size, self.hidden_size)
for i in range(self.length):
h_t = cell(seq[i], h_t)
self.assertAlmostEqual(h_t.sum().item(), -3.026607, 6)
def test_LssCell(self):
cell = rnn.LssCell(self.in_features, self.hidden_size)
seq = torch.randn(self.length, self.batch_size, self.in_features)
h_t = torch.randn(self.batch_size, self.hidden_size)
for i in range(self.length):
h_t = cell(seq[i], h_t)
self.assertAlmostEqual(h_t.sum().item(), 60.245380, 6)
| 33.342857 | 73 | 0.658098 | 986 | 0.844901 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.129392 |
8b7e0f2a1f8d7363c4a7045709aa260449c86b2e | 4,816 | py | Python | mysite/myapp/forms.py | MarkArren/PhotoSocial | bb401f465a464e7cf6a7fac184cef0d40e0a9525 | [
"MIT"
]
| null | null | null | mysite/myapp/forms.py | MarkArren/PhotoSocial | bb401f465a464e7cf6a7fac184cef0d40e0a9525 | [
"MIT"
]
| null | null | null | mysite/myapp/forms.py | MarkArren/PhotoSocial | bb401f465a464e7cf6a7fac184cef0d40e0a9525 | [
"MIT"
]
| null | null | null | from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.validators import EmailValidator
from . import models
from .models import ProfileModel
from io import BytesIO
from PIL import Image, ExifTags
from django.core.files import File
def compressImage(image):
maxWidth = 440
# Open image and get bytes
imageTmp = Image.open(image).convert('RGB')
imageIO = BytesIO()
try:
# Rotate image if 'Orientation' included in metadata
# From https://stackoverflow.com/questions/13872331/rotating-an-image-with-orientation-specified-in-exif-using-python-without-pil-in
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation]=='Orientation':
break
exif=dict(imageTmp.getexif().items())
if exif[orientation] == 3:
imageTmp=imageTmp.rotate(180, expand=True)
elif exif[orientation] == 6:
imageTmp=imageTmp.rotate(270, expand=True)
elif exif[orientation] == 8:
imageTmp=imageTmp.rotate(90, expand=True)
except (AttributeError, KeyError, IndexError):
pass
# Get image attributes
width, height = imageTmp.size
newWidth = width
newHeight = height
# Check if if image needs to be cropped
crop = False
if width/height > 1.7:
# Image is too wide so cut width
ratio = height/9
newWidth = 16 * ratio
newHeight = height
crop = True
print("too wide")
elif width/height < 0.8:
# image is too tall so cut height
ratio = width / 8
newWidth = width
newHeight = 10 * ratio
crop = True
print("too tall")
if crop:
# Crop
left = (width - newWidth) / 2
top = (height - newHeight)/2
right = (width + newWidth)/2
bottom = (height + newHeight)/2
imageTmp = imageTmp.crop((left, top, right, bottom))
print("cropped")
# Resize image
ratio = maxWidth/newWidth
newWidth = newWidth * ratio
newHeight = newHeight * ratio
imageTmp = imageTmp.resize((int(newWidth), int(newHeight)))
print("resized")
# Convert to bytes, save and compress
imageTmp.save(imageIO, format='JPEG', optimize=True, quality=60)
return File(imageIO, name=image.name)
class PostForm(forms.Form):
image = forms.ImageField(label="Upload Image", required=True)
caption = forms.CharField(label="Caption", max_length=512, required=False, widget=forms.TextInput(attrs={'placeholder': 'Caption'}))
location = forms.CharField(label="Location", max_length=50, required=False, widget=forms.TextInput(attrs={'placeholder': 'Location'}))
def save(self, request):
postInstance = models.PostModel()
postInstance.image = compressImage(self.cleaned_data["image"])
postInstance.caption = self.cleaned_data["caption"]
postInstance.location = self.cleaned_data["location"]
profile = models.ProfileModel.objects.filter(user=request.user.id)
postInstance.profile = profile[0]
postInstance.save()
return postInstance
# class PostForm(ModelForm):
# class meta:
# model = models.PostModel
# fields = ('image', 'caption', 'location')
def must_be_unique_email(value):
user = User.objects.filter(email=value)
if len(user) > 0:
raise forms.ValidationError("Email Already Exists")
return value
def must_be_unique_username(value):
user = User.objects.filter(username=value)
if len(user) > 0:
raise forms.ValidationError("Username Already Exists")
return value
class RegistrationForm(UserCreationForm):
# email = forms.EmailField(
# label="Email",
# required=True,
# validators=[EmailValidator]
# )
username = forms.CharField(label='Username',
required=True,
max_length=30
)
class Meta:
model = User
fields = ("username",
"password1", "password2")
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
# user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
# def __init__(self, *args, **kwargs):
# super(RegistrationForm, self).__init__(*args, **kwargs)
# self.fields['fullname'] = user.first_name + user.last_name
class ProfileForm(ModelForm):
class Meta:
model = ProfileModel
fields = ('profilePicture', 'fullname', 'email', 'bio')
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
# class ProfileForm(forms.Form):
# profilePicture = forms.ImageField(label="Profile Picture", required=False)
# bio = forms.CharField(label="Bio", max_length=512, required=False)
# def save(self, request):
# profileInstance = models.PostModel()
# postInstance.user = request.user
# profileInstance.profilePicture = self.cleaned_data["profilePicture"]
# profileInstance.bio = self.cleaned_data["bio"]
# profileInstance.save()
# return profileInstance
| 28.163743 | 135 | 0.706811 | 1,686 | 0.350083 | 0 | 0 | 0 | 0 | 0 | 0 | 1,640 | 0.340532 |
8b80a8a516beaa5b7d7dde65eb8c098754473d58 | 1,442 | py | Python | up/tasks/sparse/models/heads/cls_head.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
]
| 196 | 2021-10-30T05:15:36.000Z | 2022-03-30T18:43:40.000Z | up/tasks/sparse/models/heads/cls_head.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
]
| 12 | 2021-10-30T11:33:28.000Z | 2022-03-31T14:22:58.000Z | up/tasks/sparse/models/heads/cls_head.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
]
| 23 | 2021-11-01T07:26:17.000Z | 2022-03-27T05:55:37.000Z | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
from up.tasks.cls.models.heads import BaseClsHead, ConvNeXtHead
__all__ = ['SparseBaseClsHead', 'SparseConvNeXtHead']
@MODULE_ZOO_REGISTRY.register('sparse_base_cls_head')
class SparseBaseClsHead(BaseClsHead):
def __init__(self, num_classes, in_plane, input_feature_idx=-1, use_pool=True, dropout=None):
super(SparseBaseClsHead, self).__init__(num_classes, in_plane, input_feature_idx=-1,
use_pool=True, dropout=None)
def forward_net(self, x):
x = x['features'][self.input_feature_idx]
x = self.get_pool_output(x)
x = self.get_dropout(x)
logits = self.get_logits(x)
return {'logits': logits}
@MODULE_ZOO_REGISTRY.register('sparse_convnext_head')
class SparseConvNeXtHead(ConvNeXtHead):
def __init__(self,
num_classes,
in_plane,
input_feature_idx=-1,
head_init_scale=1.,
use_pool=True,
dropout=None):
super(SparseConvNeXtHead, self).__init__(num_classes, in_plane, input_feature_idx, use_pool, dropout)
def forward_net(self, x):
x = x['features'][self.input_feature_idx]
x = self.get_pool_output(x)
x = self.layer_norm(x)
x = self.get_dropout(x)
logits = self.get_logits(x)
return {'logits': logits}
| 36.974359 | 109 | 0.647018 | 1,143 | 0.792649 | 0 | 0 | 1,251 | 0.867545 | 0 | 0 | 119 | 0.082524 |
8b8159fcb82d3a08050148abdcf3102b1846cbb7 | 4,753 | py | Python | app/xl/long_runner.py | evgeniyabrosin/anfisa | ac4aef1a816de05ee2a45aa5b220e2baf93574de | [
"Apache-2.0"
]
| 8 | 2019-03-26T16:07:46.000Z | 2021-12-30T13:38:06.000Z | app/xl/long_runner.py | evgeniyabrosin/anfisa | ac4aef1a816de05ee2a45aa5b220e2baf93574de | [
"Apache-2.0"
]
| 13 | 2018-11-07T19:37:20.000Z | 2022-02-21T17:11:45.000Z | app/xl/long_runner.py | evgeniyabrosin/anfisa | ac4aef1a816de05ee2a45aa5b220e2baf93574de | [
"Apache-2.0"
]
| 15 | 2018-10-16T08:15:11.000Z | 2022-02-21T14:07:29.000Z | # Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by Sergey Trifonov based on contributions by Joel Krier,
# Michael Bouzinier, Shamil Sunyaev and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Condition
from datetime import datetime
from forome_tools.job_pool import ExecutionTask
from forome_tools.log_err import logException
from app.config.a_config import AnfisaConfig
#===============================================
class XL_LongRunner_DTreeCounts(ExecutionTask):
def __init__(self, ds_h, rq_id, dtree_h, point_idxs = None):
ExecutionTask.__init__(self, "dtree-counts")
self.mDS = ds_h
self.mRqID = rq_id
self.mDTreeH = dtree_h
self.mCondition = Condition()
self.mCounts = [None] * len(dtree_h)
self.mFailureCount = 0
self.mNextPointIdxs = []
self.mTimeAccess = datetime.now()
for idx in (range(len(dtree_h))
if point_idxs is None else point_idxs):
if dtree_h.pointNotActive(idx):
self.mCounts[idx] = self.mDS.getEvalSpace().makeEmptyCounts()
else:
self.mNextPointIdxs.append(idx)
def getTaskType(self):
return "dtree-counts"
def outOfDate(self, cur_datetime):
with self.mDS:
return (self.mCondition is None
and cur_datetime - self.mTimeAccess
> AnfisaConfig.cconfigOption("long.run.passtime"))
def execIt(self):
while True:
with self.mDS:
if len(self.mNextPointIdxs) == 0:
break
idx = self.mNextPointIdxs[0]
try:
with self.mCondition:
self.mCondition.notify_all()
counts = self.mDS.getEvalSpace().evalTotalCounts(
self.mDTreeH.getActualCondition(idx))
except Exception as err:
logException("Long run exception in DS=%s"
% self.mDS.getName())
self.mFailureCount += 1
if self.mFailureCount > AnfisaConfig.configOption(
"long.run.failures"):
raise err
else:
continue
with self.mDS:
self.mTimeAccess = datetime.now()
self.mCounts[idx] = counts
if counts[0] == 0 and self.mDTreeH.checkZeroAfter(idx):
for idx1 in range(idx, len(self.mCounts)):
self.mCounts[idx1] = counts[:]
for j, pcounts in enumerate(self.mCounts):
if pcounts is not None and j in self.mNextPointIdxs:
self.mNextPointIdxs.remove(j)
with self.mDS:
with self.mCondition:
self.mCondition.notify_all()
self.mCondition = None
return False
def getEvaluatedCounts(self, next_points = None, time_end = None):
condition = None
with self.mDS:
if next_points is not None:
next_points_idxs = []
for idx in next_points:
if (0 <= idx < len(self.mCounts)
and self.mCounts[idx] is None):
next_points_idxs.append(idx)
for idx in self.mNextPointIdxs:
if (idx not in next_points_idxs
and self.mCounts[idx] is None):
next_points_idxs.append(idx)
self.mNextPointIdxs = next_points_idxs
while time_end is not None:
time_now = datetime.now()
if time_now >= time_end:
break
with self.mDS:
condition = self.mCondition
if condition is None:
break
timeout = (time_end - time_now).total_seconds()
with condition:
condition.wait(timeout)
with self.mDS:
self.mTimeAccess = datetime.now()
return self.mCounts[:]
#===============================================
| 39.608333 | 77 | 0.564696 | 3,672 | 0.772565 | 0 | 0 | 0 | 0 | 0 | 0 | 1,001 | 0.210604 |
8b8201f75514c47ff34e925027bea925196f4d34 | 23,209 | py | Python | cosmos_virtual_assistant_uf.py | Nishit014/COSMOS | 3042377715f6f4b0eb0a75b6b360415a965754df | [
"MIT"
]
| 1 | 2021-06-27T11:53:43.000Z | 2021-06-27T11:53:43.000Z | cosmos_virtual_assistant_uf.py | Aayush9027/COSMOS_VIRTUAL_ASSISTANT | d02aa04a66b2acdfeaf9270607059182f54e78a5 | [
"MIT"
]
| null | null | null | cosmos_virtual_assistant_uf.py | Aayush9027/COSMOS_VIRTUAL_ASSISTANT | d02aa04a66b2acdfeaf9270607059182f54e78a5 | [
"MIT"
]
| 1 | 2021-06-25T12:04:24.000Z | 2021-06-25T12:04:24.000Z | import pyttsx3
import speech_recognition as sr
import os
import subprocess
#from requests import request , session
#from pprint import pprint as pp
import json
import requests
import datetime
from datetime import date
import time
import calendar
import warnings
import random
import wikipedia
import webbrowser
from pywhatkit import sendwhatmsg_instantly
import smtplib
import sys
import pyjokes
import pyautogui
import PyPDF2
from tkinter.filedialog import *
import psutil
import speedtest
import wolframalpha
warnings.filterwarnings("ignore") #ignoring all the warnings
if sys.platform == "win32":
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
else:
engine=pyttsx3.init('nsss') #sapi5 - SAPI5 on Windows #nsss - NSSpeechSynthesizer on Mac OS X #espeak - eSpeak on every other platform
voices=engine.getProperty('voices')
#for i in range(48):
#print(voices[i].id)
engine.setProperty('voice',voices[10].id)#10b 17 26 28 37 39
def speak(audio): #fn for talking txt to spch,audio is string
engine.say(audio)#say fn for speaking
print(audio)
engine.runAndWait()
def take_command():
r=sr.Recognizer()
with sr.Microphone() as source:
print('Go ahead,I am listening....')
#r.pause_threshold=1
r.adjust_for_ambient_noise(source)
audio=r.listen(source)
try:
print('Hold on a momment,Recognizing...')
query=r.recognize_google(audio,language='en-in')
print(f'User said:{query}\n')
except:
speak("There was some problem please try again")
return "None"
return query
def wish():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am COSMOS. How may I help you")
def open_file(filename,filename1):
if sys.platform == "win32":
os.startfile(filename)
else:
try:
opener = f'/Applications/{filename}.app/Contents/MacOS/{filename1}'
subprocess.call([opener])
except:
opener = f'/System/Applications/{filename}.app/Contents/MacOS/{filename1}'
subprocess.call([opener])
def sendEmail(to,content):
server=smtplib.SMTP("smtp.gmail.com",587)
server.ehlo()
server.starttls()
server.login("email","password")
server.sendmail("email id",to,content)
server.close()
def news():
#https://newsapi.org/ ##get apikey from here
api_key='Your api key here!!!'
main_url = f'http://newsapi.org/v2/top-headlines?sources=techcrunch&apiKey={api_key}'
main_page = requests.get(main_url).json()
# print(main_page)
articles = main_page["articles"]
# print(articles)
head = []
numbers=["first","second","third","fourth","fifth"]
for ar in articles:
head.append(ar["title"])
for i in range (len(numbers)):
speak(f"today's {numbers[i]} news is: {head[i]}")
def crypto(slug):
#https://coinmarketcap.com/ ##get apikey from here
apiurl='https://pro-api.coinmarketcap.com'
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'Your api key here!!!',
}
session=requests.session()
session.headers.update(headers)
def coins_price(apiurl,slug):
url=apiurl+'/v1/cryptocurrency/quotes/latest'
parameters={'slug':slug}
r=session.get(url,params=parameters)
data=r.json()['data']
all=str(data)
x=all.find('price')
all=all[x:x+20]
for p in all.split():
try:
float(p)
price=p
except:
pass
speak(f'{slug} price is {price}')
return price
#pp(coins_price(apiurl,slug))
coins_price(apiurl,slug)
def weather():
def loc():
try:
ipadd=requests.get("https://api.ipify.org").text
url="https://get.geojs.io/v1/ip/geo/"+ipadd+".json"
geo_requests= requests.get(url)
geo_data=geo_requests.json()
city=geo_data['city']
except:
city='delhi'
return city
#https://home.openweathermap.org/ ##get apikey from here
api_key = 'Your api key here!!!'
base_url = 'https://api.openweathermap.org/data/2.5/weather?'
city_name = loc()
url = base_url + "&q=" + city_name + "&appid=" + api_key
session=requests.session()
r = session.get(url)
data = r.json()
#data
if data["cod"] != "404":
y = data["main"]
current_temperature = y["temp"]
current_humidiy = y["humidity"]
z = data["weather"]
weather_description = z[0]["description"]
#print(" Temperature is " +str(int(current_temperature-273.15)) +" degree celcius\n humidity is " + str(current_humidiy) +"%\n description " + str(weather_description))
speak(" Temperature is " +str(int(current_temperature-273.15)) +" degree celcius\n humidity is " + str(current_humidiy) +"%\n with " + str(weather_description)+'in '+city_name)
def pdf_reader():
book=askopenfilename()
try:
pdfreader=PyPDF2.PdfFileReader(book)
pages=pdfreader.numPages
speak(f"Total numbers of pages in this pdf are {pages}")
speak("sir please enter the page number you want me to read")
pg=int(input("please enter the page number:"))
for num in range(pg,pages):
page=pdfreader.getPage(pg)
text=page.extractText()
speak(text)
except :
speak("Operation Cancelled !")
def adv_search():
query=input('Question: ')
#https://products.wolframalpha.com/api/ ##get apikey from here
app_id='Your api key here!!!'
client=wolframalpha.Client(app_id)
if 'no thanks' in query or 'thanks' in query or 'close advance search mode' in query:
speak('closing advance search mode')
else:
res=client.query(query)
ans=next(res.results).text
speak(ans)
speak('want to search anything else?')
adv_search()
def TaskExecution():
# function for coin toss task
def htLine1():
speak("It's " + res)
def htLine2():
speak("You got " + res)
def htLine3():
speak("It landed on " + res)
wish()
bye=True
while bye:
query=take_command().lower()
#query=input() ##comment above and remove this for typing instead of speaking for testing
# Tasks
if "what is your name" in query:
speak('I am COSMOS your virtual assistant.')
continue
if "tell me about yourself" in query:
speak('I am COSMOS your virtual assistant. What can I do for you?')
continue
elif 'why cosmos' in query or 'Why is your name cosmos' in query:
speak("Just like cosmos is filled with endless possibilities this program also have endless possibilites and thats why cosmos")
continue
elif 'price of' in query or 'tell me the price of' in query:
query=query.replace('tell me the price of ','')
query=query.replace('price of ','')
crypto(query)
speak('need something else?')
elif 'weather' in query:
#query=query.replace('how is the weather in',' ')## can be made to take location ##not implemented
#query=query.replace('weather in',' ')
#query=query.replace('weather',' ')
weather()
speak('need something else?')
elif "open notepad" in query:
npath="C:\\WINDOWS\\system32\\notepad.exe"
os.startfile(npath)
elif "open command prompt" in query:
os.system("start cmd")
bye=False
elif 'the time' in query:
strTime=datetime.datetime.now().strftime('%H:%M')
#print(f'its {strTime}')
speak(f'its {strTime}')
speak('you want me to do anything else?')
elif "todays date" in query or "the date"in query:
today = date.today()
d2 = today.strftime("%B %d, %Y")
speak(f"Today is {d2}")
speak('you want me to do anything else?')
elif "ip address" in query:
ip=requests.get('https://api.ipify.org').text#.text returns ip in unicode
speak(f"Your IP Address is {ip}")
speak('you want me to do anything else?')
elif 'wikipedia' in query:
speak('Searching in wikipedia')
query=query.replace('wikipedia',' ')
results=wikipedia.summary(query,sentences=2)
speak('According to wikipedia')
#print(results)
speak(results)
speak('you want me to do anything else')
elif 'open google' in query:
webbrowser.open("https://google.com")
bye=False
elif 'open youtube' in query:
webbrowser.open('https://youtube.com')
bye=False
elif 'what is' in query:
#query=query.replace('what is',' ')
result=wikipedia.summary(query,sentences=2)
#print(result)
speak(result)
speak('anything else?')
elif 'search in youtube' in query or 'open in youtube' in query: #search in youtube
query=query.replace('search in youtube',' ')
query=query.replace('open in youtube',' ')
webbrowser.open(f'https://www.youtube.com/results?search_query={query}')
speak(f'searchin in youtube {query}')
bye=False
#walframalpha
elif 'advance search mode' in query or 'advanced search mode' in query:
##not gonna work by speaking input
speak('Advance search mode activated')
try:
adv_search()
except Exception as e:
speak("Sorry,I am currently unable to find the answers.Please try again later")
speak('do you want me to do anything else?')
continue
elif 'search' in query or 'search in google' in query or 'open in google' in query: #search in google tab
query=query.replace('search',' ')
query=query.replace('search in google',' ')
query=query.replace('open in google',' ')
webbrowser.open(f"https://google.com/search?q={query}")
speak(f'searching in google {query}')
bye=False
elif ("open gfg" in query or "open geeksforgeeks" in query):
webbrowser.open("https://www.geeksforgeeks.org")
bye=False
elif "send message on whatsapp" in query or 'send message' in query:
speak("To whom should I send a message")
speak(" Please type the number ")
no=input("Enter the number:")
speak(" what should I send ?")
speak('You will have to scan for whatsapp web.')
subquery=take_command().lower()
sendwhatmsg_instantly(f"+91{no}",f"{subquery}")
bye=False
elif "email" in query:
try:
speak("To whom do you want to send mail?")
to=input("Enter the mail id to whom you want to send:")
speak("what should i say?")
subquery=take_command().lower()
sendEmail(to,subquery)
speak("Email has been sent.")
speak('want to do anything else?')
except Exception as e:
speak("Sorry,I am currently unable to send the email.Please try again later")
speak('do you want me to do anything else?')
elif 'visual studio code' in query or 'open code' in query or 'code' in query or 'visual code' in query:
open_file('Visual Studio Code','Electron')
speak('visual studio code is open now')
bye=False
elif 'safari' in query:
open_file('Safari','Safari')
speak('Safari is open now')
bye=False
elif 'calculator' in query:
open_file('Calculator','Calculator')
speak('Calculator is open now')
bye=False
elif 'chrome' in query:
open_file('Google Chrome','Google Chrome')
speak('Chrome is open now')
bye=False
elif "close notepad" in query:
speak("okay sir, closing notepad")
os.system("taskkill/f /im notepad.exe")
speak('you want me to do anything else?')
elif ("close cmd"in query or "close command prompt" in query):
speak("okay sir, closing cmd")
os.system("taskkill /f /im cmd.exe")
speak('you want me to do anything else?')
elif 'joke' in query or 'jokes' in query:
joke = pyjokes.get_joke('en','all')
#print(joke)
speak(joke)
speak('anything else?')
elif 'jobs' in query or 'job' in query or 'job recommandation' in query or 'work' in query:
platforms = [
'linkedin', 'indeed', 'glassdoor', 'hackerrank', 'naukri',
'intern shala'
]
speak("Select a platform that you prefer:")
print('\n'.join(platforms))
statement1 = take_command().lower()
#statement1 = input()
if (statement1 == 0):
continue
if 'linkedin' in statement1 or 'LinkedIn' in statement1 or 'Linkedin' in statement1:
webbrowser.open_new_tab("https://www.linkedin.com/jobs")
speak("LinkedIn is open now")
break
elif 'indeed' in statement1:
webbrowser.open_new_tab("https://www.indeed.com/jobs")
speak("Indeed is open now")
break
elif 'glassdoor' in statement1:
webbrowser.open_new_tab("https://www.glassdoor.com/jobs")
speak("Glassdoor is open now")
break
elif 'hackerrank' in statement1:
webbrowser.open_new_tab(
"https://www.hackerrank.com/jobs/search")
speak("HackerRank is open now")
break
elif 'naukri' in statement1:
webbrowser.open_new_tab("https://www.naukri.com/jobs")
speak("Naukri is open now")
break
elif 'intern shala' in statement1:
webbrowser.open_new_tab('internshala.com')
speak('Intern Shala is open now')
break
else:
speak("Sorry we couldn't find your search!!!")
speak('you want me to do anything else?')
#time.sleep(3)
elif "shut down the system" in query:
os.system("shutdown /s /t 5")
elif 'movie ticket booking' in query or 'movie booking' in query or 'movie ticket' in query:
speak('opening bookmyshow')
webbrowser.open_new_tab("https://in.bookmyshow.com/")
speak(" Book my show website is open now")
bye=False
elif "restart the system" in query:
os.system("shutdown /r /t 5")
elif 'online courses' in query or 'course' in query:
platforms = [
'coursera', 'udemy', 'edx', 'skillshare', 'datacamp', 'udacity'
]
speak("Select a platform that you prefer : ")
print("\n".join(platforms))
statement1 = take_command().lower()
if statement1 == 0:
continue
if 'coursera' in statement1:
webbrowser.open_new_tab("https://www.coursera.org")
speak("Coursera is open now")
bye=False
elif 'udemy' in statement1:
webbrowser.open_new_tab("https://www.udemy.com")
speak("udemy is open now")
bye=False
elif 'edx' in statement1:
webbrowser.open_new_tab("https://www.edx.org/")
speak("edx is open now")
bye=False
elif 'skillshare' in statement1:
webbrowser.open_new_tab("https://www.skillshare.com")
speak("skill share is open now")
bye=False
elif 'datacamp' in statement1:
webbrowser.open_new_tab("https://www.datacamp.com")
speak("datacamp is open now")
bye=False
elif 'udacity' in statement1:
webbrowser.open_new_tab("https://www.udacity.com")
speak("udacity is open now")
bye=False
else:
speak("Sorry we couldn't find your search!!!")
speak('you want me to do anything else?')
elif 'train ticket booking' in query or 'train booking' in query or 'train ticket' in query or 'train ticket' in query:
speak('opening website for train ticket booking')
webbrowser.open_new_tab("https://www.railyatri.in/train-ticket/")
speak(" IRCTC website is open now, have a good journey !")
bye=False
elif 'bus ticket booking' in query or 'bus booking' in query or 'bus ticket' in query:
speak('opening website for bus ticket booking')
webbrowser.open_new_tab("https://www.redbus.in")
speak(" Red bus website is open now, have a good journey !")
bye=False
elif 'airplane ticket booking' in query or 'airplane booking' in query or 'airplane ticket' in query:
speak('opening website for airplane ticket booking')
webbrowser.open_new_tab("https://www.goindigo.in")
speak(" Indigo website is open now, have a good journey !")
bye=False
elif "hotel" in query or "hotel booking" in query:
speak('Opening go ibibo .com')
webbrowser.open_new_tab('https://goibibo.com/hotels')
bye=False
elif "sleep the system" in query:
os.system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0")
elif 'switch the window' in query:
if sys.platform == "win32":
pyautogui.keyDown("alt")
pyautogui.press("tab")
time.sleep(1)
pyautogui.keyUp("alt")
bye=False
else:
pyautogui.keyDown("command")
pyautogui.press("tab")
time.sleep(1)
pyautogui.keyUp("command")
bye=False
elif ("tell me news" in query or "news" in query):
speak("Please wait, Fetching the latest news")
news()
speak('need something else?')
elif ("tell me my location" in query or "location" in query):
speak("Hold on,Locating our current location")
try:
ipadd=requests.get("https://api.ipify.org").text
url="https://get.geojs.io/v1/ip/geo/"+ipadd+".json"
geo_requests= requests.get(url)
geo_data=geo_requests.json()
city=geo_data['city']
country=geo_data['country']
speak(f"We are in {city},{country}")
speak('need something else?')
except Exception as e:
speak("Sorry,I am unable to locate our current location due to poor connectivity. Please try after sometime.")
bye=False
elif "take a screenshot" in query or "take screenshot" in query:
name=datetime.datetime.now()
speak("taking screenshot...")
time.sleep(3)
img=pyautogui.screenshot()
img.save(f"{name}.png")
speak("Screenshot taken")
speak('need anything else?')
elif "read pdf" in query or " read book " in query :
pdf_reader()
bye=False
elif "how much battery is left" in query or "how much power is left" in query or "battery" in query:
battery=psutil.sensors_battery()
percentage=battery.percent
speak(f"We have {percentage} percent battery. ")
if percentage>=50:
speak("We have enough power to go on.")
elif percentage>=20 and percentage<50:
speak("You shall connect the system to a charging point")
elif percentage<20:
speak("Battery about to die,connect to a charging point as soon as possible")
speak('you want me to do anything else')
elif "internet speed" in query:
speak("Checking internet speed")
st=speedtest.Speedtest()
dl=round(float(st.download())/8000000,2)
up=round(float(st.upload())/8000000,2)
speak(f"Current downloading speed is {dl}mb/s while uploading speed is {up}")
speak('you want me to do anything else?')
elif "volume up" in query:
pyautogui.press("volumeup")
speak('you want me to do anything else?')
elif "volume down" in query:
pyautogui.press("volumedown")
speak('you want me to do anything else?')
elif "volume mute" in query or "mute" in query:
pyautogui.press("volumemute")
speak('you want me to do anything else?')
elif 'flip the coin' in query or 'toss the coin' in query or 'toss a coin' in query or 'flip a coin' in query:
chances = ['Heads', 'Tails']
res = random.choice(chances)
picLine = random.randint(1, 3)
lines = [htLine1, htLine2, htLine3]
lines[picLine - 1]()
speak('you want me to do anything else?')
elif 'dice' in query:
num = random.randint(1, 6)
speak("Your rolled " + str(num))
speak('you want me to do anything else?')
elif 'bye' in query or 'no' in query or ' no thanks' in query:
speak('Untill next time')
bye=False
else:
speak("Sorry,I don't know how to do that right now but i am still learning how to be more helpful")
speak('anything else?')
#time.sleep(2)
if __name__=="__main__":
TaskExecution() | 38.553156 | 186 | 0.550605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,882 | 0.382696 |
8b820e62535a256f6892582e2d661efa4be1b944 | 1,748 | py | Python | model.py | TilenHumar/Vislice | 5970fb4d887a5689b906a7190fabb5405b25bbc7 | [
"MIT"
]
| null | null | null | model.py | TilenHumar/Vislice | 5970fb4d887a5689b906a7190fabb5405b25bbc7 | [
"MIT"
]
| 2 | 2021-04-19T15:51:18.000Z | 2021-04-19T16:17:06.000Z | model.py | TilenHumar/Vislice | 5970fb4d887a5689b906a7190fabb5405b25bbc7 | [
"MIT"
]
| null | null | null | import random
# najprej konstante
STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = "+"
PONOVLJENA_CRKA = "o"
NAPACNA_CRKA = "-"
ZMAGA = "W"
PORAZ = "X"
class Igra:
def __init__(self, geslo, crke):
self.geslo = geslo.upper() # pravilno geslo
self.crke = crke.upper() # do sedaj ugibane črke
# v igri so zgolj velike crke
def napacne_crke(self):
return [crka for crka in self.crke if crka not in self.geslo]
def pravilne_crke(self):
return [crka for crka in self.crke if crka in self.geslo]
def stevilo_napak(self):
return len(self.napacne_crke())
def poraz(self):
return self.stevilo_napak() > STEVILO_DOVOLJENIH_NAPAK
def zmaga (self):
return all([i in self.crke for i in self.geslo])
def pravilni_del_gesla(self):
rezultat = ""
for crka in self.geslo:
if crka in self.crke:
rezultat += crka
else:
rezultat += "_"
return rezultat
def nepravilni_ugibi(self):
return " ".join(self.napacne_crke())
def ugibaj(self, crka):
crka = crka.upper()
if self.poraz():
return PORAZ
if crka in self.crke:
return PONOVLJENA_CRKA
self.crke += crka
if self.zmaga():
return ZMAGA
if crka in self.geslo:
return PRAVILNA_CRKA
if self.poraz:
return PORAZ
return NAPACNA_CRKA
bazen_besed = []
with open("besede.txt", encoding ="utf8") as input_file:
bazen_besed = input_file.readlines()
def nova_igra(bazen_besed):
beseda = random.choice(bazen_besed).strip()
return Igra(beseda, "")
| 23 | 69 | 0.582952 | 1,369 | 0.782733 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.0749 |
8b827211c6d78c4e03e51a44190d8e3f1cffc3db | 2,443 | py | Python | legacy/models/GAT.py | astrockragh/IceCube | eba09e9f9a3c351dbf05496821bcd7d29ac0261c | [
"MIT"
]
| null | null | null | legacy/models/GAT.py | astrockragh/IceCube | eba09e9f9a3c351dbf05496821bcd7d29ac0261c | [
"MIT"
]
| null | null | null | legacy/models/GAT.py | astrockragh/IceCube | eba09e9f9a3c351dbf05496821bcd7d29ac0261c | [
"MIT"
]
| 2 | 2021-03-03T20:39:38.000Z | 2021-06-09T11:58:00.000Z | import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.random import set_seed
from spektral.transforms.layer_preprocess import LayerPreprocess
from spektral.layers import GATConv
from spektral.layers.pooling.global_pool import GlobalMaxPool, GlobalAvgPool, GlobalSumPool
from tensorflow.keras.layers import Dense, LeakyReLU, BatchNormalization
from tensorflow.keras.activations import tanh
from tensorflow.sparse import SparseTensor
hidden_states = 16
activation = LeakyReLU(alpha = 0.1)
class model(Model):
def __init__(self, n_out = 4):
super().__init__()
# Define layers of the model
self.att1 = GATConv(hidden_states, attn_heads=2, dropout_rate=0.4, activation = "relu", return_attn_coef=False) #required keywords is channels/hidden states
self.att2 = GATConv(hidden_states//2, attn_heads=3, dropout_rate=0.1, activation = "relu")# attn heads are the time limiting key_word, watch out with it
self.att3 = GATConv(hidden_states*2, attn_heads=4, dropout_rate=0.7, activation = "relu") # hiddenstates has to be pretty low as well
self.Pool1 = GlobalAvgPool() #good results with all three
self.Pool2 = GlobalSumPool()
self.Pool3 = GlobalMaxPool() #important for angle fitting
self.decode = [Dense(size * hidden_states) for size in [16, 8, 4]]
self.norm_layers = [BatchNormalization() for i in range(len(self.decode))]
self.d2 = Dense(n_out)
def call(self, inputs, training = False):
x, a, i = inputs
# a=sp_matrix_to_sp_tensor(a)
LayerPreprocess(self.att1)
LayerPreprocess(self.att2)
# x, alpha = self.att1([x,a])
x = self.att1([x,a])
x = self.att2([x, a])
x = self.att3([x,a])
x1 = self.Pool1([x, i])
x2 = self.Pool2([x, i])
x3 = self.Pool3([x,i])
x = tf.concat([x1, x2, x3], axis = 1)
# x = tf.concat([x1, x2], axis = 1)
# x = tf.concat([x2, x3], axis = 1)
for decode_layer, norm_layer in zip(self.decode, self.norm_layers):
x = activation(decode_layer(x))
x = norm_layer(x, training = training)
x = self.d2(x)
# return x, alpha
# tf.print(tf.shape(x))
return x | 46.09434 | 164 | 0.669668 | 1,758 | 0.719607 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.17151 |
8b83777c4cdb8551be6cb2f0840e3a838be9ce71 | 792 | py | Python | pyleecan/Methods/Slot/SlotUD2/get_surface_active.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
]
| 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Slot/SlotUD2/get_surface_active.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
]
| 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Slot/SlotUD2/get_surface_active.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
]
| 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | from numpy import arcsin, exp
from ....Classes.Segment import Segment
from ....Classes.Arc1 import Arc1
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full winding surface
Parameters
----------
self : SlotUD2
A SlotUD2 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_wind: Surface
Surface corresponding to the Winding Area
"""
st = self.get_name_lam()
surface = self.active_surf.copy()
surface.label = "Wind_" + st + "_R0_T0_S0"
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
| 22.628571 | 59 | 0.64899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.520202 |
8b849ac15aeae749f8a20c70f9517f14b9a20eb1 | 3,402 | py | Python | features/haralick.py | annaformaniuk/smoke-detection | 217014e9a2a5b9861f4cda3d4c1abce4aca34773 | [
"MIT"
]
| 7 | 2019-05-29T07:43:40.000Z | 2022-02-10T07:44:11.000Z | features/haralick.py | annaformaniuk/smoke-detection | 217014e9a2a5b9861f4cda3d4c1abce4aca34773 | [
"MIT"
]
| 1 | 2020-06-07T10:50:50.000Z | 2020-06-07T10:50:50.000Z | features/haralick.py | annaformaniuk/smoke-detection | 217014e9a2a5b9861f4cda3d4c1abce4aca34773 | [
"MIT"
]
| 4 | 2019-11-26T15:05:03.000Z | 2021-05-10T13:41:15.000Z | # from https://gogul09.github.io/software/texture-recognition
import cv2
import numpy as np
import os
import glob
import mahotas as mt
from sklearn.svm import LinearSVC
from typing import List
import matplotlib.pyplot as plt
import pickle
# load the training dataset
train_path = "../inputs/for_texture_model/train"
train_names = os.listdir(train_path)
# empty list to hold feature vectors and train labels
train_features = []
train_labels = []
def extract_features(image):
# calculate haralick texture features for 4 types of adjacency
textures = mt.features.haralick(image)
# take the mean of it and return it
ht_mean = textures.mean(axis=0)
return ht_mean
def train_feature_model():
# loop over the training dataset
print("[STATUS] Started extracting haralick textures..")
for train_name in train_names:
current_path = train_path + "/" + train_name
current_label = train_name
index = 1
for file in glob.glob(current_path + "/*.jpg"):
print("Processing Image - {} in {}".format(
index, current_label))
# read the training image
image = cv2.imread(file)
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# extract haralick texture from the image
features = extract_features(gray)
# append the feature vector and label
train_features.append(features)
train_labels.append(current_label)
# show loop update
index += 1
# have a look at the size of our feature vector and labels
print("Training features: {}".format(np.array(train_features).shape))
print("Training labels: {}".format(np.array(train_labels).shape))
# create the classifier
print("[STATUS] Creating the classifier..")
clf_svm = LinearSVC(random_state=9)
# fit the training data and labels
print("[STATUS] Fitting data/label to model..")
clf_svm.fit(train_features, train_labels)
# save the model to disk
filename = 'outputs/finalized_model.sav'
pickle.dump(clf_svm, open(filename, 'wb'))
# loop over the test images
test_path = "../inputs/for_texture_model/test"
fig = plt.figure(figsize=(5, 5))
for i, file in enumerate(glob.glob(test_path + "/*.jpg")):
# read the input image
image = cv2.imread(file)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# extract haralick texture from the image
features = extract_features(gray)
# evaluate the model and predict label
prediction = clf_svm.predict(features.reshape(1, -1))[0]
# show the label
ax = fig.add_subplot(1, 4, i + 1)
ax.imshow(image, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(prediction, fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
# display the output image
fig.tight_layout()
plt.show()
| 35.072165 | 77 | 0.581717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,082 | 0.318048 |
8b85c3050d99d93fcfb7dcf610efc1fcee13814c | 6,645 | py | Python | src/werdich_cfr/code_not_used/Shinichi_3D_Inception_Model.py | awerdich/werdich-cfr | 39d9a7f05d0a92304a6dd60df0124068735222ad | [
"MIT"
]
| null | null | null | src/werdich_cfr/code_not_used/Shinichi_3D_Inception_Model.py | awerdich/werdich-cfr | 39d9a7f05d0a92304a6dd60df0124068735222ad | [
"MIT"
]
| null | null | null | src/werdich_cfr/code_not_used/Shinichi_3D_Inception_Model.py | awerdich/werdich-cfr | 39d9a7f05d0a92304a6dd60df0124068735222ad | [
"MIT"
]
| null | null | null | def inception_module(x,filters_1x1,filters_3x3_reduce,filters_3x3,filters_5x5_reduce,filters_5x5,filters_pool_proj,trainable=True):
conv_1x1 = Conv3D(filters_1x1, (1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(x)
conv_3x3 = Conv3D(filters_3x3_reduce, (1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(x)
conv_1x1 = BatchNormalization(scale=False,trainable=trainable)(conv_1x1)
conv_3x3 = BatchNormalization(scale=False,trainable=trainable)(conv_3x3)
conv_3x3 = Conv3D(filters_3x3, (3,3,3), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(conv_3x3)
conv_3x3 = BatchNormalization(scale=False,trainable=trainable)(conv_3x3)
conv_5x5 = Conv3D(filters_5x5_reduce, (1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(x)
conv_5x5 = BatchNormalization(scale=False,trainable=trainable)(conv_5x5)
conv_5x5 = Conv3D(filters_5x5, (3,3,3),strides=(1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(conv_5x5)
conv_5x5 = BatchNormalization(scale=False,trainable=trainable)(conv_5x5)
conv_5x5 = Conv3D(filters_5x5, (3,3,3),strides=(1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(conv_5x5)
#conv_7x7 = BatchNormalization()(conv_5x5)
#conv_7x7 = Conv3D(filters_5x5, (3,3,3),strides=(1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_7x7)
pool_proj = MaxPooling3D((3,3,3), strides=(1,1,1), padding='same')(x)
pool_proj = Conv3D(filters_pool_proj, (1,1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(pool_proj)
output = concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj], axis=4)
return output
def get_models(Inbatchsize):
trainable=True #False
inputEco=Input(shape=SHAPE)
inputScale=Input(shape=(1,))
inputInvScale=Input(shape=(1,))
filters=64#48
x=inputEco
# x=BatchNormalization()(x)
x=Conv3D(filters,(7,7,7),padding='same',strides=(2,2,2),activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(x)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=MaxPooling3D(pool_size=(3,3,3),padding='same',strides=(2,2,2))(x)
x=Conv3D(filters,(1,1,1),padding='same',strides=(1,1,1),activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(x)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=Conv3D(filters*3,(3,3,3),padding='same',strides=(1,1,1),activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init,trainable=trainable)(x)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=filters,filters_3x3_reduce=int(filters*1.5),filters_3x3=filters*4,filters_5x5_reduce=int(filters/4),filters_5x5=int(filters/2),filters_pool_proj=int(filters/2),trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=filters*2,filters_3x3_reduce=filters*2,filters_3x3=filters*3,filters_5x5_reduce=int(filters/2),filters_5x5=filters*3,filters_pool_proj=filters,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=MaxPooling3D(pool_size=(1,3,3),padding='same',strides=(2,2,2))(x) #(1,3,3)
x=inception_module(x,filters_1x1=filters*3,filters_3x3_reduce=int(filters*1.5),filters_3x3=int(filters*3.25),filters_5x5_reduce=int(filters/4),filters_5x5=int(filters*0.75),filters_pool_proj=filters,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=int(filters*2.5),filters_3x3_reduce=int(filters*1.75),filters_3x3=int(filters*3.5),filters_5x5_reduce=int(filters*0.375),filters_5x5=filters,filters_pool_proj=filters,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=filters*2,filters_3x3_reduce=filters*2,filters_3x3=filters*4,filters_5x5_reduce=int(filters*0.375),filters_5x5=filters,filters_pool_proj=filters,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=int(filters*1.75),filters_3x3_reduce=int(filters*2.25),filters_3x3=int(filters*4.5),filters_5x5_reduce=int(filters/2),filters_5x5=filters,filters_pool_proj=filters,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=filters*4,filters_3x3_reduce=int(filters*2.5),filters_3x3=filters*5,filters_5x5_reduce=int(filters/2),filters_5x5=filters*2,filters_pool_proj=filters*2,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=MaxPooling3D((1,3,3), strides=(2,2,2))(x) #(2,3,3) padding='same'
x=inception_module(x,filters_1x1=filters*4,filters_3x3_reduce=int(filters*2.5),filters_3x3=filters*5,filters_5x5_reduce=int(filters/2),filters_5x5=filters*2,filters_pool_proj=filters*2,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=inception_module(x,filters_1x1=filters*6,filters_3x3_reduce=filters*3,filters_3x3=filters*6,filters_5x5_reduce=int(filters*0.75),filters_5x5=filters*2,filters_pool_proj=filters*2,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=MaxPooling3D(pool_size=(1,3,3),trainable=trainable)(x)
x=inception_module(x,filters_1x1=filters*6,filters_3x3_reduce=filters*3,filters_3x3=filters*6,filters_5x5_reduce=int(filters*0.75),filters_5x5=filters*2,filters_pool_proj=filters*2,trainable=trainable)
x=BatchNormalization(scale=False,trainable=trainable)(x)
x=GlobalAveragePooling3D()(x)
x=Dropout(0.4)(x)
x=Dense(1,activation="relu",trainable=trainable)(x)
x=BatchNormalization(scale=False,trainable=trainable)(x)
a=inputScale
#a=Dense(256,activation='relu')(a)
a= multiply([x,a])
b=inputInvScale
#b=Dense(256,activation='relu')(b)
b=multiply([x,b])
x=concatenate([x,a,b])
x=BatchNormalization(scale=False,trainable=trainable)(x)
#x=Dense(768,activation="relu")(x)
#xa=BatchNormalization(scale=False)(x)
#xa=Dropout(0.2)(xa)
xa=Dense(1,name="reg")(x)
model=Model([inputEco,inputScale,inputInvScale],xa)
return model | 89.797297 | 224 | 0.78164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 581 | 0.087434 |
8b87afec28b6e06554c41af8512eee6c2652795a | 4,441 | py | Python | asciidoxy/templates/helpers.py | lurch/asciidoxy | 9781ba696637fadbf62f1b7c5da843b0d292007d | [
"Apache-2.0"
]
| null | null | null | asciidoxy/templates/helpers.py | lurch/asciidoxy | 9781ba696637fadbf62f1b7c5da843b0d292007d | [
"Apache-2.0"
]
| null | null | null | asciidoxy/templates/helpers.py | lurch/asciidoxy | 9781ba696637fadbf62f1b7c5da843b0d292007d | [
"Apache-2.0"
]
| null | null | null | # Copyright (C) 2019-2020, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for API reference templates."""
from asciidoxy.generator import Context
def _arg_name(param):
if param.name:
return f" {param.name}"
else:
return ""
def link_from_ref(ref,
context: Context,
nested_start="<",
nested_end=">",
args_start="(",
args_end=")",
skip_args=False):
if ref is None:
return ""
if ref.nested is not None:
if len(ref.nested) > 0:
nested = (f"{nested_start}"
f"{', '.join(link_from_ref(r, context) for r in ref.nested)}"
f"{nested_end}")
else:
nested = f"{nested_start}{nested_end}"
else:
nested = ""
if not skip_args and ref.args is not None:
if len(ref.args) > 0:
arg_parts = [f"{link_from_ref(a.type, context)}{_arg_name(a)}" for a in ref.args]
args = f"{args_start}{', '.join(arg_parts)}{args_end}"
else:
args = f"{args_start}{args_end}"
else:
args = ""
if ref.id:
return (f"{ref.prefix or ''}{context.link_to_element(ref.id, ref.name)}{nested}{args}"
f"{ref.suffix or ''}").strip()
else:
return f"{ref.prefix or ''}{ref.name}{nested}{args}{ref.suffix or ''}".strip()
def print_ref(ref, nested_start="<", nested_end=">", args_start="(", args_end=")"):
if ref is None:
return ""
if ref.nested is not None:
if len(ref.nested) > 0:
nested = f"{nested_start}{', '.join(print_ref(r) for r in ref.nested)}{nested_end}"
else:
nested = f"{nested_start}{nested_end}"
else:
nested = ""
if ref.args is not None:
if len(ref.args) > 0:
arg_parts = [f"{print_ref(a.type)}{_arg_name(a)}" for a in ref.args]
args = f"{args_start}{', '.join(arg_parts)}{args_end}"
else:
args = f"{args_start}{args_end}"
else:
args = ""
return f"{ref.prefix or ''}{ref.name}{nested}{args}{ref.suffix or ''}".strip()
def argument_list(params, context: Context):
return f"({', '.join(type_and_name(p, context) for p in params)})"
def type_list(params):
return f"({', '.join(print_ref(p.type) for p in params)})"
def has(elements):
return len(list(elements)) > 0
def chain(first_collection, second_collection):
yield from first_collection
yield from second_collection
def type_and_name(param, context: Context):
return f"{link_from_ref(param.type, context)} {param.name}".strip()
def method_signature(element, context: Context, max_width: int = 80):
static = "static" if element.static else ""
return_type = link_from_ref(element.returns.type, context) if element.returns else ""
method_name = element.name
method_without_params = " ".join(part for part in (static, return_type, method_name) if part)
if not element.params:
return (f"{method_without_params}()")
return_type_no_ref = print_ref(element.returns.type, context) if element.returns else ""
method_without_params_length = len(" ".join(part for part in (static, return_type_no_ref,
method_name) if part))
param_sizes = [len(f"{print_ref(p.type)} {p.name}".strip()) for p in element.params]
indent_size = method_without_params_length + 1
first_indent = ""
if any(indent_size + size + 1 > max_width for size in param_sizes):
indent_size = 4
first_indent = "\n "
param_separator = f",\n{' ' * indent_size}"
formatted_params = f"{param_separator.join(type_and_name(p, context) for p in element.params)}"
return (f"{method_without_params}({first_indent}{formatted_params})")
| 33.390977 | 99 | 0.611124 | 0 | 0 | 112 | 0.02522 | 0 | 0 | 0 | 0 | 1,794 | 0.403963 |
8b88d9d29f78c551c398e16471317d51e96b8e76 | 2,511 | py | Python | fin_model_course/pltemplates/graphics/model_structure.py | whoopnip/fin-model-course | e6c5ae313bba601c4aca0f334818b61cc0393118 | [
"MIT"
]
| 5 | 2020-08-29T15:28:39.000Z | 2021-12-01T16:53:25.000Z | fin_model_course/pltemplates/graphics/model_structure.py | whoopnip/fin-model-course | e6c5ae313bba601c4aca0f334818b61cc0393118 | [
"MIT"
]
| 16 | 2020-02-26T16:03:47.000Z | 2021-06-15T15:17:37.000Z | fin_model_course/pltemplates/graphics/model_structure.py | whoopnip/fin-model-course | e6c5ae313bba601c4aca0f334818b61cc0393118 | [
"MIT"
]
| 3 | 2021-01-22T19:38:36.000Z | 2021-09-28T08:14:00.000Z | import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
def get_model_structure_graphic() -> lg.TikZPicture:
inputs_block_options = [
'fill=orange!30'
]
model_block_options = [
'fill=blue!50'
]
sub_model_block_options = [
'fill=blue!90'
]
step_block_options = [
'fill=cyan!20'
]
outputs_block_options = [
'fill=green!20'
]
text_options = [
'text=white'
]
step_text_options = [
'text=black'
]
inputs_text_options = outputs_text_options = step_text_options
arrow_options = [
'line width=0.75mm',
]
inputs_rectangle = lg.Rectangle(2, 8, offset=(-3.35, 4), contents=pl.Bold('Inputs'),
shape_options=inputs_block_options,
text_options=inputs_text_options)
model_rectangle = lg.Rectangle(5, 8, offset=(1.25, 4), contents=pl.Bold('Model'), content_position='bottom',
content_offset=0.2, shape_options=model_block_options,
text_options=text_options)
outputs_rectangle = lg.Rectangle(2, 8, offset=(5.85, 4), contents=pl.Bold('Outputs'),
shape_options=outputs_block_options,
text_options=outputs_text_options)
sub_model_rectangles = []
step_rectangles = []
for i in range(3):
y_offset = 1.75 + i * 2.5
sub_model_rectangles.append(
lg.Rectangle(4, 1.75, offset=(1.25, y_offset), contents='Sub-Model',
shape_options=sub_model_block_options, text_options=text_options,
content_position='bottom'),
)
for j in range(3):
x_offset = j * 1.25
step_rectangles.append(
lg.Rectangle(1.1, 1, offset=(x_offset, y_offset + 0.2), contents='Step',
shape_options=step_block_options, text_options=step_text_options,
)
)
arrows = [
lg.Arrow((-2.3, 4), (-1.3, 4), options=arrow_options),
lg.Arrow((3.8, 4), (4.8, 4), options=arrow_options),
]
return lg.TikZPicture([
inputs_rectangle,
model_rectangle,
*sub_model_rectangles,
*step_rectangles,
outputs_rectangle,
*arrows,
])
| 29.541176 | 112 | 0.560335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.068897 |
8b898fc8f9613f97a1b09d6b849378dd2047f47d | 51 | py | Python | index.py | JaidevstudioRobot/hackoctober2021 | d5855ac4bc797d7abb85b76f8b4a28e4a0dafaea | [
"MIT"
]
| null | null | null | index.py | JaidevstudioRobot/hackoctober2021 | d5855ac4bc797d7abb85b76f8b4a28e4a0dafaea | [
"MIT"
]
| null | null | null | index.py | JaidevstudioRobot/hackoctober2021 | d5855ac4bc797d7abb85b76f8b4a28e4a0dafaea | [
"MIT"
]
| 1 | 2021-10-04T18:16:06.000Z | 2021-10-04T18:16:06.000Z | # Hello python
a = "Hello I m Robot Jai"
print(a)
| 12.75 | 25 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.705882 |
8b89b607196b90b61199e59cb3a2c777f0b348f7 | 1,748 | py | Python | calc.py | V-Perotto/Contador_NomeSobrenome_Decimal | 1e625306254c3f48e4c722e6ad04601f65af4c3c | [
"CC0-1.0"
]
| null | null | null | calc.py | V-Perotto/Contador_NomeSobrenome_Decimal | 1e625306254c3f48e4c722e6ad04601f65af4c3c | [
"CC0-1.0"
]
| null | null | null | calc.py | V-Perotto/Contador_NomeSobrenome_Decimal | 1e625306254c3f48e4c722e6ad04601f65af4c3c | [
"CC0-1.0"
]
| null | null | null | from alfabeto import *
from main import nameSur
# Listas
letras = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R",
"S", "T", "U", "V", "W", "X", "Y", "Z"]
def start(nameSur):
letra = nameSur.split()
junto = ''.join(letra)
return junto
def calc_alfabeto(letters, spaces):
for word in nameSur:
switcher = {
'a' or 'A': letraA,
'b' or 'B': letraB,
'c' or 'B': letraC,
'd' or 'B': letraD,
'e' or 'B': letraE,
'f' or 'B': letraF,
'g' or 'B': letraG,
'h' or 'B': letraH,
'i' or 'B': letraI,
'j' or 'B': letraJ,
'k' or 'B': letraK,
'l' or 'B': letraL,
'm' or 'B': letraM,
'n' or 'B': letraN,
'o' or 'B': letraO,
'p' or 'B': letraP,
'q' or 'B': letraQ,
'r' or 'B': letraR,
's' or 'B': letraS,
't' or 'B': letraT,
'u' or 'B': letraU,
'v' or 'B': letraV,
'w' or 'B': letraW,
'x' or 'B': letraX,
'y' or 'B': letraY,
'z' or 'B': letraZ,
' ': nonLetra
# default: print("ERROR: Incorrect Character")
}
for word in nameSur:
for letter in letras:
if word == letter:
letters += 1
return letters
if word == " ":
spaces += 1
return spaces
print("\nTem", letters, "letras.")
print("Tem", spaces, "espacos.") | 31.214286 | 119 | 0.371854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.232265 |
8b8bd5685a8cb00d4aca06f90e968e83f7055e18 | 10,174 | py | Python | ioflo/aio/proto/devicing.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
]
| 128 | 2015-01-14T12:26:56.000Z | 2021-11-06T07:09:29.000Z | ioflo/aio/proto/devicing.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
]
| 17 | 2015-01-28T18:26:50.000Z | 2020-11-19T22:08:06.000Z | ioflo/aio/proto/devicing.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
]
| 29 | 2015-01-27T23:28:31.000Z | 2021-05-04T16:37:30.000Z | """
Device Base Package
"""
from __future__ import absolute_import, division, print_function
import struct
from binascii import hexlify
from collections import deque, namedtuple
import enum
import socket
from ...aid.sixing import *
from ...aid.odicting import odict
from ...aid.byting import bytify, unbytify, packify, unpackify
from ...aid.eventing import eventify, tagify
from ...aid import getConsole
from .. import aioing
from .protoing import MixIn
console = getConsole()
class Device(MixIn):
"""
Device Class
"""
def __init__(self,
stack,
uid=None,
name=None,
ha=None,
kind=None,
):
"""
Initialization method for instance
Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device host address
kind is type of device
Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device host address
.kind is type of device
"""
self.stack = stack
self.uid = uid if uid is not None else stack.nextUid()
self.name = name if name is not None else "Device{0}".format(self.uid)
self.ha = ha if ha is not None else ''
self.kind = kind
def show(self):
"""
Display device data
"""
result = ("Device: name={0} uid={1} ha={2} kind={3}\n".format(
self.name,
self.uid,
self.ha,
self.kind))
return result
def process(self):
"""
Timer based processing
"""
pass
def receive(self, rx):
"""
Process received rx msg/pkt/data.
"""
pass
class LocalDevice(Device):
"""
Local Device Class
"""
def __init__(self,
stack,
**kwa):
"""
Initialization method for instance
Assumes local device in stack is created before any remotes are added
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device host address
kind is type of device
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device host address
.kind is type of device
"""
super(LocalDevice, self).__init__(stack=stack, **kwa)
class RemoteDevice(Device):
"""
Remote Device Class
"""
def __init__(self,
stack,
uid=None,
**kwa):
"""
Initialization method for instance
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device host address
kind is type of device
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device host address
.kind is type of device
"""
if uid is None:
uid = stack.nextUid()
while uid in stack.remotes or uid in (stack.local.uid,):
uid = stack.nextUid()
super(RemoteDevice, self).__init__(stack=stack, uid=uid, **kwa)
def receive(self, msg):
"""
Process received rx msg/pkt/data.
"""
if msg is not None:
self.stack.rxMsgs.append(msg)
class SingleRemoteDevice(Device):
"""
Remote Device Class when only one remote in stack .remote
Affects how uid is assigned
"""
def __init__(self,
stack,
uid=None,
uids=None,
**kwa):
"""
Initialization method for instance
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device host address
kind is type of device
Parameters:
uids is sequence or set of used uids to not use for remote if uid not provided
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device host address
.kind is type of device
Attributes:
.uids is sequence or set of used uids to not use for remote if uid not provided
"""
if uid is None:
uids = set(uids) if uids is not None else set()
if hasattr(stack, 'local'):
uids.add(stack.local.uid)
uid = stack.nextUid()
while uid in uids:
uid = stack.nextUid()
super(SingleRemoteDevice, self).__init__(stack=stack, uid=uid, **kwa)
class IpDevice(Device):
"""
IP device
"""
def __init__(self,
stack,
ha=None,
**kwa):
"""
Initialization method for instance
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device udp host address, a duple (host,port)
kind is type of device
Parameters:
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device udp host address, a duple (host,port)
.kind is type of device
Attributes:
"""
if ha:
host, port = ha
host = aioing.normalizeHost(host)
if host in ('0.0.0.0',):
host = '127.0.0.1'
elif host in ("::", "0:0:0:0:0:0:0:0"):
host = "::1"
ha = (host, port)
else:
ha = ('127.0.0.1', stack.Port)
super(IpDevice, self).__init__(stack=stack, ha=ha, **kwa)
@property
def host(self):
"""
Property that returns host of local interface ha duple (host, port)
"""
return self.ha[0]
@host.setter
def host(self, value):
"""
Setter for host property
"""
host, port = self.local.ha
self.local.ha = (value, port)
@property
def port(self):
"""
Property that returns host of local interface ha duple (host, port)
"""
return self.ha[1]
@port.setter
def port(self, value):
"""
Setter for host property
"""
host, port = self.local.ha
self.local.ha = (host, value)
class IpLocalDevice(IpDevice, LocalDevice):
"""
Ip LocalDevice
"""
def __init__(self,
stack,
**kwa):
"""
Initialization method for instance
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device udp host address, a duple (host,port)
kind is type of device
Parameters:
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device udp host address, a duple (host,port)
.kind is type of device
Attributes:
"""
super(IpLocalDevice, self).__init__(stack=stack, **kwa)
class IpRemoteDevice(IpDevice, RemoteDevice):
"""
Ip remote device
"""
def __init__(self,
stack,
**kwa):
"""
Initialization method for instance
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device udp host address, a duple (host,port)
kind is type of device
Parameters:
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device udp host address, a duple (host,port)
.kind is type of device
Attributes:
"""
super(IpRemoteDevice, self).__init__(stack=stack, **kwa)
class IpSingleRemoteDevice(IpDevice, SingleRemoteDevice):
"""
Ip Single Remote Device Class when only one remote in stack .remote
Affects how uid is assigned
"""
def __init__(self,
stack,
**kwa):
"""
Initialization method for instance
Inherited Parameters:
stack is Stack managing this device required
name is user friendly name of device
uid is unique device id
ha is device udp host address, a duple (host,port)
kind is type of device
Parameters:
Inherited Attributes:
.stack is Stack managing this device required
.name is user friendly name of device
.uid is unique device id per channel or site
.ha is device udp host address, a duple (host,port)
.kind is type of device
Attributes:
"""
super(IpSingleRemoteDevice, self).__init__(stack=stack, **kwa)
| 27.276139 | 91 | 0.548555 | 9,671 | 0.95056 | 0 | 0 | 648 | 0.063692 | 0 | 0 | 6,315 | 0.6207 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.