blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5dac554d2b83d3ae6d2f7f816a6e0e619379dd4f | b8815e8465c64f7b9b42266366cf60903267a6ee | /predykcja.py | fdf9c0f1271e6e951640312ab0ccaeb38fdcd5be | []
| no_license | cynddan/ProjektII | 0779f85c6ee26d7b72b4ebdfe51fe684ed860214 | d16a1ba1e4bc1bdb67486e03d079def361318172 | refs/heads/master | 2022-12-19T18:43:18.448186 | 2020-09-29T14:39:00 | 2020-09-29T14:39:00 | 299,641,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | import os
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
from keras.models import load_model
from keras.utils import np_utils
from keras.models import model_from_json
import numpy as np
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model.h5")
Dane = []
file = open('daneDoPredykcji.txt', 'r')
file.readlines()
for line in file.readlines():
readDataStep = 0
tempStr = ""
seasonStr = ""
for char in line.rstrip():
if char != ' ':
if readDataStep == 0:
tempStr += char
elif readDataStep == 1:
seasonStr += char
else:
readDataStep += 1
Dane.append(float(tempStr))
if seasonStr == "winter":
Dane.append(int(1))
elif seasonStr == "autumn":
Dane.append(int(2))
elif seasonStr == "spring":
Dane.append(int(3))
elif seasonStr == "summer":
Dane.append(int(4))
file.close()
Dane = np.array(Dane)
prediction = loaded_model.predict(Dane)
print(prediction) | [
"[email protected]"
]
| |
abb7a5ff2e147e6f3a2c6e5b5b06e12ddf6207c3 | 4a1273f72e7d8a07a3fa67ac9f2709b64ec6bc18 | /retiresmartz/tests/test_social_security.py | 79ee980a0f79030246707e0e54844ec9226eb916 | []
| no_license | WealthCity/django-project | 6668b92806d8c61ef9e20bd42daec99993cd25b2 | fa31fa82505c3d0fbc54bd8436cfc0e49c896f3e | refs/heads/dev | 2021-01-19T14:10:52.115301 | 2017-04-12T11:23:32 | 2017-04-12T11:23:32 | 88,132,284 | 0 | 1 | null | 2017-04-13T06:26:30 | 2017-04-13T06:26:29 | null | UTF-8 | Python | false | false | 370 | py | from datetime import date
from django.test import TestCase
from retiresmartz.calculator.social_security import calculate_payments
class SocialSecurityTests(TestCase):
def test_calculate_payments(self):
amounts = calculate_payments(dob=date(1975, 1, 1), income=60000)
self.assertEqual(amounts[67], 2055)
self.assertEqual(amounts[68], 2219)
| [
"[email protected]"
]
| |
60c860ab7b2872850b368745818ecefc77a57aee | e78c0512b369dc88ab0d975689a1d90600ead8e4 | /PlaneGame.py | 998bc7ce344089f16a5a21be25e2fb38e2765524 | []
| no_license | Lugaba/pythonGames | 61b8a57b6821b82d26566bf9406e18e180b9ea3b | bff67349aef9c419304710e1bc4811fa0b00be19 | refs/heads/master | 2023-09-02T21:41:32.503448 | 2021-11-21T22:43:21 | 2021-11-21T22:43:21 | 418,909,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,667 | py | import pygame
import random
# Controle mais fácil das teclas pressionadas
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
# Define objeto Player estendendo a classe pygame.sprite.Sprite
# A superfície desenhada na tela é um atributo de ‘player’
class Player(pygame.sprite.Sprite):
def __init__(self):
super(Player, self).__init__()
self.surf = pygame.Surface((75, 25))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect()
# Movimenta o Player em função das teclas pressionadas
def update(self, pressed_keys):
if pressed_keys[K_UP]:
self.rect.move_ip(0, -5)
if pressed_keys[K_DOWN]:
self.rect.move_ip(0, 5)
if pressed_keys[K_LEFT]:
self.rect.move_ip(-5, 0)
if pressed_keys[K_RIGHT]:
self.rect.move_ip(5, 0)
# Mantém o jogador no limite da tela
if self.rect.left < 0:
self.rect.left = 0
elif self.rect.right > SCREEN_WIDTH:
self.rect.right = SCREEN_WIDTH
if self.rect.top <= 0:
self.rect.top = 0
elif self.rect.bottom >= SCREEN_HEIGHT:
self.rect.bottom = SCREEN_HEIGHT
# Define objeto Enemy estendendo a classe pygame.sprite.Sprite
# A superfície desenhada na tela é um atributo de ‘Enemy’
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((20, 10))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect(
center=(
random.randint(SCREEN_WIDTH + 20, SCREEN_WIDTH + 100),
random.randint(0, SCREEN_HEIGHT),
)
)
self.speed = random.randint(5, 20)
# Movimenta o sprite baseando-se na velocidade
# Remove o sprite quando atinge o limite esquerdo da tela
def update(self):
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0:
self.kill()
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
# Cria um evento próprio
ADDENEMY = pygame.USEREVENT + 1
pygame.time.set_timer(ADDENEMY, 250)
# Instancia um jogador (agora é um retângulo)
player = Player()
# Cria grupo para armazenar os sprites dos inimigos e todos os sprites
# inimigos são usados para detectar colisão e atualizar as posições
enemies = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
jogoAtivo = True
clock = pygame.time.Clock()
while jogoAtivo:
clock.tick(30)
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
jogoAtivo = False
elif event.type == QUIT:
jogoAtivo = False
# Adiciona novos inimigos
elif event.type == ADDENEMY:
# Cria novo inimigo e adiciona no grupo de sprites
new_enemy = Enemy()
enemies.add(new_enemy)
all_sprites.add(new_enemy)
pressed_keys = pygame.key.get_pressed()
# atualiza posição do player
player.update(pressed_keys)
# atualiza posição dos inimigos
enemies.update()
screen.fill((135, 206, 250))
# Desenha todos os sprites
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
# Verifica se algum inimigo colidiu com algum inimigo
if pygame.sprite.spritecollideany(player, enemies):
# Remove jogador e encerra programa
player.kill()
jogoAtivo = False
# Desenha jogador na tela
pygame.display.flip() | [
"[email protected]"
]
| |
a43db7b03f7c69fdc8431e4a0733ef8ede7afd24 | 31f422747ef95fff5b443b1489cfd3c893c0e623 | /HashWrapper.py | 0745f5dccbc54bcae88440b1ba684b1a0acbc657 | []
| no_license | jlp1701/fbHash | 50f745b24c7fc627195d4e62edbbd47aaba3aaa1 | c2c587e5f235a71996dbf2f66a94ad1ac9cb199e | refs/heads/master | 2022-11-26T11:20:04.493474 | 2020-07-30T08:02:36 | 2020-07-30T08:02:36 | 283,619,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | import os
import subprocess
import random
import re
import ssdeep
class HashWrapper(object):
"""docstring for HashWrapper"""
def __init__(self, path_bin, args_hash, args_compare, compare_regex):
super(HashWrapper, self).__init__()
self.path_bin = path_bin
self.args_hash = args_hash
self.args_compare = args_compare
self.compare_regex = compare_regex
def hashf(self, file_path):
cmd = [f"{self.path_bin}"]
cmd.extend(self.args_hash)
cmd.append(f"{file_path}")
proc_ret = subprocess.run(cmd, stdout=subprocess.PIPE)
# print(f"stdout: {proc_ret.stdout}")
if proc_ret.returncode != 0:
raise Exception(f"Hash program returned error code: {proc_ret.returncode}")
if proc_ret.stdout == b'':
raise Exception(f"No output for input file: {file_path}")
return proc_ret.stdout
def hashd(self, data):
# create temporary file
suff = str(random.randint(0, 1000000))
file_path = f"/dev/shm/hashd_{suff}.txt"
with open(file_path, "wb") as f:
f.write(data)
try:
h = self.hashf(file_path)
finally:
os.remove(file_path)
return h
def compare(self, h1, h2):
# write both hashes to temp file
suff = str(random.randint(0, 1000000))
file_path = f"/dev/shm/compare_{suff}.txt"
with open(file_path, "wb") as f:
f.write(h1)
f.write(h2)
# compare
# cmd = [f"{self.path_bin}", "-t", "0", "-c", f"{file_path}"]
cmd = [self.path_bin]
cmd.extend(self.args_compare)
cmd.append(file_path)
# print(f"cmd: {cmd}")
proc_ret = subprocess.run(cmd, stdout=subprocess.PIPE)
os.remove(file_path)
# print(f"stdout: {proc_ret.stdout}")
if proc_ret.returncode != 0:
raise Exception(f"Compare program returned error code: {proc_ret.returncode}")
if proc_ret.stdout == b'':
raise Exception(f"No output for input file: {file_path}")
m = re.match(self.compare_regex, str(proc_ret.stdout))
if m is None:
raise Exception(f"Output couldn't be parsed.")
return float(m.group(1))
if __name__ == '__main__':
hw = HashWrapper("sdhash", [""], ["-t", "0", "-c"], r".*?\|.*?\|(\d{3})")
for i in range(1):
print(i)
h1 = hw.hashf("b.text")
h2 = hw.hashf("a.text")
with open("a.text", "rb") as f:
f1 = f.read()
with open("b.text", "rb") as f:
f2 = f.read()
# # print(f"h1: {h1}")
# # print(f"h2: {h2}")
print(f"sdhash: {hw.compare(h1, h2)}")
print(f"ssdeep: {ssdeep.compare(ssdeep.hash(f1), ssdeep.hash(f2))}")
| [
"[email protected]"
]
| |
a2107ed1f5a6887c7579c341ea54dae9bcd302d8 | 4e372ff685bfb9af1c418e0c5401d5410e3adb69 | /random_poke_collection/random_poke_collection/wsgi.py | 9dc6e4059d8f2c6386e684e660bda09196b7afd2 | []
| no_license | opinedo1/pokemon-collection-project | 8afbd1133d4fac3a63bcf4aa4cf440a6c814c4bf | 69fafd1f6957e63202aac36d3dfef1fa9188c63d | refs/heads/main | 2023-05-15T04:48:23.382961 | 2021-06-17T01:54:51 | 2021-06-17T01:54:51 | 377,675,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for random_poke_collection project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'random_poke_collection.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
d7285d374c07a17aa751fce925cb59371bbdded4 | 605a05e9ea73648cb10e79ed4c0075671e4bc974 | /BeamPlot/Satpass.py | 803789768322d6c47dd23f76f8cf27d0ff171182 | []
| no_license | lishere/mwa_beam_measurement | 7872b9e0f978775d328df1b0675c75bc576aca88 | b4262fc835689aa19fdb84a2c027b848a0e90deb | refs/heads/master | 2021-06-21T13:52:06.047869 | 2017-08-22T12:13:21 | 2017-08-22T12:13:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,801 | py | #############################################################
################### SATELLITE PASSES ######################
#############################################################
import csv, ephem, numpy as np
from collections import namedtuple
from BeamPlot.TLE import TLE_config
from BeamPlot.Settings import lb, floc, req_base, exc_base, coords
from BeamPlot.TimeMethods import tm
# Create immutable object containers (call as tuple.par)
Satellite = namedtuple('Satellite', 'ID desig cls op status')
Pass = namedtuple('Pass', 'sat rise fall TLE')
class Sateph(object):
# Calculate passes and ephemeris for sats in satdata file (refined by inclusion/exclusion conditions)
def __init__(self, require=req_base, exclude=exc_base, pos=coords['MWA'], show=True):
with open(floc['satdata']) as csvfile: # Load satinfo from csv
csvfile.readline() # Dump header line in csv
csvdata = csv.reader(csvfile) # Parse csv into nrows x lists
sats = [Satellite(*row[:5]) for row in csvdata] # Parse first six cols each row
for field,req in require.items(): # Remove sats (inclusion)
sats = [s for s in sats if getattr(s,field) in req]
for field,exc in exclude.items(): # Remove sats (exclusion)
sats = [s for s in sats if getattr(s,field) not in exc]
self.sats = {sat.ID:sat for sat in sats} # Create satellite container
self.obs = ephem.Observer() # Initialise pyEphem observer class
self.obs.lat, self.obs.lon, self.obs.elevation = pos # Set observer coordinates (MWA)
self.obs.pressure = 0
def get_sat_alt_az(self, satellite_desig, timestamps, quiet=False): #, showplot=True, restrict=None):
#produce a list of numpy arrays in t,alt,az given a satellite designation
t_max=timestamps[-1]
def compute(u, seek=False):
# Keep constant tle for ephemeris calculation over single pass
self.obs.date = ephem.Date(u.eph())
self.target.compute(self.obs)
alt_rad=self.target.alt
alt_deg=(alt_rad/np.pi)*180.0
az_rad=self.target.az
az_deg=(az_rad/np.pi)*180.0
return [tm(self.obs.date).utc, alt_deg, az_deg]
def geteph(timestamps):
# Compute eph for single pass- operates in the form: target.compute(obs)
timestamps = [tm(float(t)) for t in timestamps]
for sat in self.sats.values():
if (sat.desig == satellite_desig):
sat_tuple=sat
tle = TLE_config(sat_tuple, t_max) # Mount TLE database file
#print "latest TLE is from %s " % (tle.lastrecord())
self.target = ephem.readtle('TLE0',*tle.mount(t_max).split(',')) # Set appropriate TLE
#self.target = ephem.readtle('TLE0',*satpass.TLE.split(','))
eph = [compute(u) for u in timestamps]
return np.swapaxes(eph,0,1)
return geteph(timestamps)
def passes(self, tmin, tmax, passfile=floc['Nov2015'], saveas=None, show=True):
"""Finds rise and set times of all passes falling within bounds
Note: next_pass returns the following data as a tuple:
0- Rise time, 1- Rise azimuth
2- Max alt time, 3- Max alt
4- Set time, 5- Set azimuth """
def satpass(tmin, tmax):
def nextpass(u):
# Update target for rise/set calculations by updating tle
rise, fall = 0, -1
while rise>fall:
self.obs.date = ephem.Date(u) # Set obs. date
target = ephem.readtle('TLE0',*tle.mount(u).split(',')) # Set appropriate TLE
passdata = self.obs.next_pass(target) # Compute passdata
print passdata
rise, fall = float(passdata[0]), float(passdata[4])
print passdata[2], passdata[3]
u = tm(u).eph()-ephem.minute
return rise, fall # Return rise/set times
passes = [] # Create container
umin, umax = tm(tmin).eph(), tm(tmax).eph() # Convert to Julian dates
for sat in self.sats.values():
tle = TLE_config(sat, tmax) # Mount TLE database file
rise, fall = nextpass(umin) # Find first pass
while rise<umax:
passes.append(Pass(sat,tm(rise),tm(fall),tle.now)) # Add pass info
rise, fall = nextpass(fall+5*ephem.second) # Find next pass
return passes
#Either calculate data or load from previously calculated data
self.passlist = []
self.printout1(tmin,tmax,show,passfile) # Print satpass range
if passfile:
with open(passfile) as f:
for line in f:
dat = line.strip().split(';')
self.passlist.append(Pass(self.sats[dat[0]],tm(dat[1]),tm(dat[2]),dat[3]))
else:
sortkey = lambda x: (x.rise,x.fall) # Pass sorting method
self.passlist = sorted(satpass(tmin,tmax), key=sortkey) # Produce passlist
self.printout2(self.passlist, self.sats, show) # Print passes
if saveas:
with open(saveas,'w+') as f:
[f.write('%s;%.2f;%.2f;%s\n'%(p.sat.ID,tm(p.rise).utc,tm(p.fall).utc,p.TLE)) for p in passlist]
return self.passlist
def ephemeris(self, satpass=None, timestamps=None, ephfile=None, saveas=None, quiet=True):
"""Calculates ephemeris for passes within trange using step size tstep (excluding
right bound if tmax-tmin%tstep!=0) to produce a list of numpy arrays in t,alt,azi"""
if ephfile:
data = [line.strip().split(',') for line in open(ephfile)]
return [[float(j) for j in i] for i in data]
def compute(u, seek=False):
# Keep constant tle for ephemeris calculation over single pass
self.obs.date = ephem.Date(u.eph())
self.target.compute(self.obs)
return [tm(self.obs.date).utc, self.target.alt, self.target.az]
# Compute eph for single pass- operates in the form: target.compute(obs)
timestamps = [tm(t) for t in timestamps if tm(t)>satpass.rise and tm(t)<satpass.fall]
self.target = ephem.readtle('TLE0',*satpass.TLE.split(','))
eph = [compute(u) for u in timestamps]
if saveas:
with open(saveas,'w+') as f:
[f.write('%.2f,%f,%f\n'%(e[0],e[1],e[2])) for e in eph]
#return np.swapaxes(eph,0,1)
return eph
def printout1(self, tmin, tmax, show, passfile):
#Print satellite pass search bounds
if show:
if passfile:
print 'Retrieving satellite passes from file (truncated at bounds)...'
else:
print 'Calculating satellite passes (truncated at bounds)...'
print 'From: %.1f -> %s\nTo: %.1f -> %s\n'%(tm(tmin).utc,tm(tmin).eph(),
tm(tmax).utc,tm(tmax).eph())
return
def printout2(self, plist, sats, show):
#Print number of passes found for each satellite
print ''
if show: # Print pass data
tabform = '%-8s%-12s%-24s%-24s'
dform = '%Y/%m/%d %H:%M:%S'
print tabform%('Pass:','Sat:','Rise:','Set:')
for i,p in zip(range(len(plist)),plist):
print ' '+tabform%(i+1, p.sat.desig ,tm(p.rise).cal(dform), tm(p.fall).cal(dform))
# Print pass count per satellite
nstring = ''.join([i.sat.desig for i in plist])
pcount = [(s.desig, nstring.count(s.desig)) for s in sats.values()]
print '\nPasscount: %d'%sum(i[1] for i in pcount)
for s in range(len(pcount)):
print '%s %-8s: %d '%('' if s%4 else '\n',pcount[s][0],pcount[s][1]),
print '\n' if s%4 else ''
return
###END###
| [
"[email protected]"
]
| |
cbd1b722e0e86cd63eb8142f7c52e5992e0f74fe | 6899f55b07bd6d49da2d331dfce217f92673ed34 | /Accounts/migrations/0026_posts_description.py | d3e8983d361a75752c164f4c44b5f79eac7bf04d | [
"MIT"
]
| permissive | Khushiraikar1/sudhaksha_maxo | e72945f2d2e6ec985b27a67f2db4465cf3a72ce2 | ccaba5426b8fcac0d6772bdb78916cb0cd0c09e7 | refs/heads/main | 2023-02-11T12:09:35.046523 | 2021-01-15T16:37:55 | 2021-01-15T16:37:55 | 317,636,328 | 2 | 6 | MIT | 2021-01-15T15:40:49 | 2020-12-01T18:46:39 | HTML | UTF-8 | Python | false | false | 400 | py | # Generated by Django 3.1.3 on 2021-01-09 13:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Accounts', '0025_auto_20210109_0049'),
]
operations = [
migrations.AddField(
model_name='posts',
name='description',
field=models.CharField(max_length=30, null=True),
),
]
| [
"[email protected]"
]
| |
cf2e1ff76673bf89c4d395061a404022a88c9d42 | 8aee66ff178f12c1af69c474d22d5b3c07c6ea5a | /WTL_Ecommerce/urls.py | 7047dbcac4ee0723d91407e7c24a53a7c2b02e9b | []
| no_license | yugvajani/Ecommerce | 65b6b9bac0b5ef13ba3002f5463d61382a0010e6 | b612ac68de8d69357fb09f77bf4fb0f7558fc5a9 | refs/heads/master | 2022-12-31T14:01:57.469654 | 2020-10-25T05:38:37 | 2020-10-25T05:38:37 | 295,621,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from users import views as user_views
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('shopping.urls')),
path('register/', include('users.urls')),
path('login/', auth_views.LoginView.as_view(template_name = 'users/login.html'),name = 'login'),
path('logout/', auth_views.LogoutView.as_view(template_name = 'users/logout.html'),name = 'logout'),
path('profile/', user_views.profile, name = 'profile'),
path('cart/<str:par1>/<str:par2>/', user_views.mycart, name = 'cart'),
path('mycart/', user_views.mycartpage, name = 'mycart'),
path('checkout/', user_views.checkout, name = 'checkout'),
]
if(settings.DEBUG):
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
4d6aebe278852de65f0041f199223a3b0802050b | 4bb0b11893b5c80e77bef80f866fbee3ec77fdf9 | /moziotest/asgi.py | eb6b345e514e65dcc36e3a73ac3921d9a409a4c2 | []
| no_license | philaser/gistest | 23ea14729126584178c85847053b7debc59f9a74 | ff1d9554d8ef740a798153164f05e0f5197b2eab | refs/heads/main | 2023-07-17T09:57:31.585646 | 2021-08-03T05:34:43 | 2021-08-03T05:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for moziotest project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'moziotest.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
02de4efdb14ee9b26c3604b8639fd795f496ff2a | 348112db732ce6480af93604ed2e44b15b6b181a | /scoreboard.py | 346bcaf1e2edde58b816dfb4671d4006cbb17d21 | []
| no_license | vzbiven/alien_invasion | b3ab89a9ee5f7dca0e4300f9d262caaa5ee9d7ff | b2a139d00e922f8fa633ad5e251126dbc1c30f03 | refs/heads/master | 2021-09-01T11:59:24.132878 | 2017-12-26T21:34:09 | 2017-12-26T21:34:09 | 115,156,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | py | import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard:
"""Класс для вывода игровой информации"""
def __init__(self, ai_settings, screen, stats):
"""Инициавлизирует атрибуты подсчета очков"""
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
# Настройка шрифта для вывода счета
self.text_color = (170, 170, 170)
self.font = pygame.font.SysFont(None, 24)
# Подготовка исходного изображения
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
"""Преобразует текущий счет в графическое изображение"""
rounded_score = round(self.stats.score, -1)
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True,
self.text_color, self.ai_settings.bg_color)
# Вывод счета в правой верхней части экрана
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 10
self.score_rect.top = 10
def prep_high_score(self):
"""Преобразует рекордный счет в графическое изображение."""
high_score = int(round(self.stats.high_score, -1))
high_score_str = "{:,}".format(high_score)
self.high_score_image = self.font.render(high_score_str,
True, self.text_color, self.ai_settings.bg_color)
#Рекорд выравнивается по центру верхней стороны
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.screen_rect.top
def prep_level(self):
"""Преобразует уровень в графическое изображение"""
self.level_image = self.font.render(str(self.stats.level),
True, self.text_color, self.ai_settings.bg_color)
# Уровень выводится под текущим счетом
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 5
def prep_ships(self):
"""Сообщает количесво оставшихся кораблей"""
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_settings, self.screen)
#new litle ship image
ship.image = pygame.image.load('images/lil_ship.bmp')
ship.rect = ship.image.get_rect()
ship.rect.x = 5 + ship_number * ship.rect.width
ship.rect.y = 5
self.ships.add(ship)
def show_score(self):
"""Выводит счет на экран"""
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| [
"[email protected]"
]
| |
e9fc29c6db8536842548dc0b16b5b63be29da3c1 | 89fe7ab88acb82b9bed1718c67570346255c4580 | /tournament/urls.py | c45c827ab525906d3f02a0184de1c577a8fb1de8 | []
| no_license | bunny232/wakegolftourfinal | aa0597a7fb80650e7771b1e1dd6c7d46a3ab0b6e | 1881a8ddff43a3b6552d5544e4d756e9e9a39de9 | refs/heads/main | 2023-04-27T01:03:46.190787 | 2021-05-07T10:35:09 | 2021-05-07T10:35:09 | 363,582,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.urls import path
from . import views
urlpatterns = [path('', views.TournamentListView.as_view(), name='tournament_list'), path('<int:pk>/', views.TournamentDetailView.as_view(), name='tournament_detail')]
| [
"[email protected]"
]
| |
05a84f2dcb29053453cd748b398607357195d124 | 46315e5409eaa258424378685b9bfac7000d7aec | /bug_tracker/views.py | 326463acef58237f2cbd57e7d7909238deb21e69 | []
| no_license | chewh115/bug_tracker | 29eb90b80fde5c756316c7ac966daf79829c977a | 5f2c0a68ce09431aafe93bab5194695c12f7dc0d | refs/heads/master | 2022-11-21T00:14:46.497265 | 2020-05-22T14:49:09 | 2020-05-22T14:49:09 | 266,133,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.shortcuts import render, reverse, HttpResponseRedirect
# Create your views here.
def index(request):
return render(request, 'index.html') | [
"[email protected]"
]
| |
77724477cb371ab0f924e77a121a7073d482a6f5 | 8ef50ff641161167a9250a4f3ba623f718ef5e2a | /fol/estimator_box.py | 4a0fe0d8929ade73c19986fe9f8811ceddbd72ee | []
| no_license | CodingMice/EFO-1-QA-benchmark | 6c1e4c14d425ac06d94bda47f2e867fb2b5d0e0e | 9d693d89192c51e3336be9ebd19f198552ba3bbe | refs/heads/master | 2023-08-11T13:02:59.547499 | 2021-09-21T02:03:53 | 2021-09-21T02:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,912 | py | from typing import List
import torch
from torch import nn
import torch.nn.functional as F
from .appfoq import (AppFOQEstimator, IntList, find_optimal_batch,
inclusion_sampling)
class BoxOffsetIntersection(nn.Module):
def __init__(self, dim):
super(BoxOffsetIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, embeddings):
layer1_act = F.relu(self.layer1(embeddings))
layer1_mean = torch.mean(layer1_act, dim=0)
gate = torch.sigmoid(self.layer2(layer1_mean))
offset, _ = torch.min(embeddings, dim=0)
return offset * gate
class CenterIntersection(nn.Module):
def __init__(self, dim):
super(CenterIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, embeddings):
layer1_act = F.relu(self.layer1(embeddings)) # (num_conj, dim)
attention = F.softmax(self.layer2(layer1_act),
dim=0) # (num_conj, dim)
embedding = torch.sum(attention * embeddings, dim=0)
return embedding
def identity(x):
return x
class BoxEstimator(AppFOQEstimator):
def __init__(self, n_entity, n_relation, gamma, entity_dim,
relation_dim, offset_activation, center_reg,
negative_sample_size, device):
super().__init__()
self.name = 'box'
self.n_entity = n_entity
self.n_relation = n_relation
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.negative_size = negative_sample_size
self.entity_dim = entity_dim
self.relation_dim = relation_dim
self.device = device
self.epsilon = 2.0
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / entity_dim]),
requires_grad=False
)
self.entity_embeddings = nn.Embedding(
num_embeddings=n_entity, embedding_dim=self.entity_dim)
self.relation_embeddings = nn.Embedding(
num_embeddings=n_relation, embedding_dim=self.relation_dim)
self.offset_embeddings = nn.Embedding(
num_embeddings=n_relation, embedding_dim=self.entity_dim)
nn.init.uniform_(self.entity_embeddings.weight,
-self.embedding_range.item(),
self.embedding_range.item())
nn.init.uniform_(self.relation_embeddings.weight,
-self.embedding_range.item(),
self.embedding_range.item())
nn.init.uniform_(self.offset_embeddings.weight,
0,
self.embedding_range.item())
self.center_net = CenterIntersection(self.entity_dim)
self.offset_net = BoxOffsetIntersection(self.entity_dim)
self.cen_reg = center_reg
if offset_activation == 'none':
self.func = identity
elif offset_activation == 'relu':
self.func = F.relu
elif offset_activation == 'softplus':
self.func = F.softplus
else:
assert False, "No valid activation function!"
def get_entity_embedding(self, entity_ids: torch.LongTensor):
center_emb = self.entity_embeddings(entity_ids)
offset_emb = torch.zeros_like(center_emb).to(self.device)
return torch.cat((center_emb, offset_emb), dim=-1)
def get_projection_embedding(self, proj_ids: torch.LongTensor, emb):
assert emb.shape[0] == len(proj_ids)
rel_emb = self.relation_embeddings(proj_ids)
r_offset_emb = self.offset_embeddings(proj_ids)
q_emb, q_off_emb = torch.chunk(emb, 2, dim=-1)
q_emb = torch.add(q_emb, rel_emb)
q_off_emb = torch.add(q_off_emb, self.func(r_offset_emb))
return torch.cat((q_emb, q_off_emb), dim=-1)
def get_negation_embedding(self, emb: torch.Tensor):
assert False, "box cannot handle negation"
def get_disjunction_embedding(self, disj_emb: List[torch.Tensor]):
return torch.stack(disj_emb, dim=1)
def get_difference_embedding(self, lemb: torch.Tensor, remb: torch.Tensor):
assert False, "box cannot handle negation"
def get_multiple_difference_embedding(self,
emb: List[torch.Tensor],
**kwargs):
assert False, "box cannot handle negation"
def get_conjunction_embedding(self, conj_emb: List[torch.Tensor]):
sub_center_list, sub_offset_list = [], []
for sub_emb in conj_emb:
sub_center, sub_offset = torch.chunk(sub_emb, 2, dim=-1)
sub_center_list.append(sub_center)
sub_offset_list.append(sub_offset)
new_center = self.center_net(torch.stack(sub_center_list))
new_offset = self.offset_net(torch.stack(sub_offset_list))
return torch.cat((new_center, new_offset), dim=-1)
def criterion(self,
pred_emb: torch.Tensor,
answer_set: List[IntList],
union=False):
pred_emb = pred_emb.unsqueeze(dim=-2)
chosen_answer, chosen_false_answer, subsampling_weight = \
inclusion_sampling(answer_set,
negative_size=self.negative_size,
entity_num=self.n_entity)
positive_all_embedding = self.get_entity_embedding(
torch.tensor(chosen_answer, device=self.device)) # b*d
positive_embedding, _ = torch.chunk(
positive_all_embedding, 2, dim=-1)
neg_embedding = self.get_entity_embedding(
torch.tensor(chosen_false_answer, device=self.device).view(-1))
neg_embedding = neg_embedding.view(
-1, self.negative_size, 2 * self.entity_dim) # batch*n*dim
negative_embedding, _ = torch.chunk(neg_embedding, 2, dim=-1)
if union:
positive_union_logit = self.compute_logit(
positive_embedding.unsqueeze(1), pred_emb)
positive_logit = torch.max(
positive_union_logit, dim=1)[0]
negative_union_logit = self.compute_logit(
negative_embedding.unsqueeze(1), pred_emb)
negative_logit = torch.max(negative_union_logit, dim=1)[0]
else:
positive_logit = self.compute_logit(positive_embedding, pred_emb)
negative_logit = self.compute_logit(negative_embedding, pred_emb)
return positive_logit, negative_logit, subsampling_weight.to(
self.device)
def compute_logit(self, entity_emb, query_emb):
query_center_embedding, query_offset_embedding = torch.chunk(
query_emb, 2, dim=-1)
delta = (entity_emb - query_center_embedding).abs()
distance_out = F.relu(delta - query_offset_embedding)
distance_in = torch.min(delta, query_offset_embedding)
logit = self.gamma - torch.norm(distance_out, p=1, dim=-1) \
- self.cen_reg * torch.norm(distance_in, p=1, dim=-1)
return logit
def compute_all_entity_logit(self,
pred_emb: torch.Tensor,
union=False) -> torch.Tensor:
all_entities = torch.LongTensor(range(self.n_entity)).to(self.device)
all_embedding, _ = torch.chunk(
self.get_entity_embedding(all_entities), 2, dim=-1)
pred_emb = pred_emb.unsqueeze(-2)
batch_num = find_optimal_batch(all_embedding,
query_dist=pred_emb,
compute_logit=self.compute_logit,
union=union)
chunk_of_answer = torch.chunk(all_embedding, batch_num, dim=0)
logit_list = []
for answer_part in chunk_of_answer:
if union:
union_part = self.compute_logit(
answer_part.unsqueeze(0).unsqueeze(0), pred_emb)
# b*disj*answer_part*dim
logit_part = torch.max(union_part, dim=1)[0]
else:
logit_part = self.compute_logit(answer_part.unsqueeze(dim=0),
pred_emb)
# batch*answer_part*dim
logit_list.append(logit_part)
all_logit = torch.cat(logit_list, dim=1)
return all_logit
| [
"[email protected]"
]
| |
919ad93f8397a45a32157a3d9ce108dcda051ccb | 7769cb512623c8d3ba96c68556b2cea5547df5fd | /configs/retinanet_x101_64x4d_fpn_1x.py | c8be724f92d2a09198980ad017f4851b0be09359 | [
"MIT"
]
| permissive | JialeCao001/D2Det | 0e49f4c76e539d574e46b02f278242ca912c31ea | a76781ab624a1304f9c15679852a73b4b6770950 | refs/heads/master | 2022-12-05T01:00:08.498629 | 2020-09-04T11:33:26 | 2020-09-04T11:33:26 | 270,723,372 | 312 | 88 | MIT | 2020-07-08T23:53:23 | 2020-06-08T15:37:35 | Python | UTF-8 | Python | false | false | 3,901 | py | # model settings
model = dict(
type='RetinaNet',
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
octave_base_scale=4,
scales_per_octave=3,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[8, 16, 32, 64, 128],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/retinanet_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
]
| |
bb0fd3227823ae168714b2d609f75a815db3c820 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pynamodb/connection/__init__.pyi | f649861fa13ab72cf0f93c2e820af2c7a7f8dc10 | []
| no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | pyi | /home/runner/.cache/pip/pool/b4/87/2e/b11aa30f971bc3d814440e95ea0252de4a2d77aa81c6e6be59eb9449f8 | [
"[email protected]"
]
| |
208a4bbe45fd1316bedf6ae67548ab3e9cdaa390 | 722d43256ac73d94b66aed7d800115e4db64879e | /posts/views.py | d4a9788f3ee2b260c69beed76aeeeafe01799949 | []
| no_license | Keci/coursecommunicator | af9829146c884fc2e3e115fa8cb1dc52229e2fb3 | 4392e8c551653ee04e8edb3e31da72f01aef4f51 | refs/heads/master | 2020-04-06T05:24:21.445000 | 2013-02-20T19:27:35 | 2013-02-20T19:27:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | from posts.models import Post
from posts.models import Tag
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.template import Context, loader
from django.core import serializers
from django.core.serializers import serialize
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from customuser.models import *
from course.models import *
from django.http import HttpResponseRedirect
from customdecorator.decorators import *
from homepage.points import PointsDetail
@login_required()
@course_permission()
def index(request, course_short_title):
return render(request, 'posts/main.html',
{
'username':request.user.username,
'course_short_title':course_short_title,
'course_id':Course.objects.get(short_title=course_short_title).id,
'taglist': Tag.objects.all(),
'taglist_json': serialize('json', Tag.objects.all())
}
)
@login_required()
@course_permission()
def feed(request, course_short_title):
return render(request, 'posts/feed.html',
{
'username':request.user.username,
'course_short_title':course_short_title
}
)
@login_required()
@course_permission()
def feed_include(request, course_short_title):
return render(request, 'posts/feed_include.html',
{
'username':request.user.username,
'course_short_title':course_short_title
}
)
def rightbar(request, course_short_title):
return render(request, 'posts/rightbar.html')
def header(request, course_short_title):
return render(request, 'posts/header.html')
| [
"[email protected]"
]
| |
6f3751a95e5c8e16e37c51ded4053218e3407a7f | fc0cd56d74c5b7def0c9d38372ef702653ec1734 | /app/models/fund.py | 0ca754c2f586b29056d1ad59a112e8ef9684284a | [
"MIT"
]
| permissive | netai/stockbag_backend | 644fec30dee33333c3a5d59911846b64e6b85c5b | b5bbc09fea896bcb1c03091579f6de658bff4c13 | refs/heads/main | 2023-02-20T06:39:58.092445 | 2021-01-22T12:02:38 | 2021-01-22T12:02:38 | 325,260,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import datetime
from ..extensions import db
class Fund(db.Model):
"""Fund model for storing Fund retated details"""
__tablename__ = "fund"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
total_amount = db.Column(db.Float, nullable=False, default=0)
invested_amount = db.Column(db.Float, nullable=False, default=0)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __repr__(self):
return "<Fund 'id: {}, type: {}'>".format(self.id,self.total_amount) | [
"[email protected]"
]
| |
226118c526c576d8edfde2c75a1994b83da6395a | 5c7bd453ac9461062436814db502154da3c38d77 | /scripts/make-release.py | d0e419d5844edda53a841ed631f399086b17a194 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | Earth4Us/flopy | 51a425862035341b03a08643678e2144343a1967 | af42e7827fe053af911efef0f37dcb76dad7e9c0 | refs/heads/develop | 2022-10-05T03:40:42.428197 | 2022-09-08T19:50:23 | 2022-09-08T19:50:23 | 79,733,250 | 0 | 0 | null | 2017-01-22T17:33:07 | 2017-01-22T17:33:06 | null | UTF-8 | Python | false | false | 11,864 | py | #!/usr/bin/python
import datetime
import json
import os
import subprocess
import sys
from importlib.machinery import SourceFileLoader
# file_paths dictionary has file names and the path to the file. Enter '.'
# as the path if the file is in the root repository directory
file_paths = {
"version.py": "../flopy",
"README.md": "../",
"PyPI_release.md": "../docs",
"code.json": "../",
"DISCLAIMER.md": "../flopy",
"notebook_examples.md": "../docs",
}
pak = "flopy"
# local import of package variables in flopy/version.py
loader = SourceFileLoader("version", os.path.join("..", "flopy", "version.py"))
version_mod = loader.load_module()
# build authors list for Software/Code citation for FloPy
authors = []
for key in version_mod.author_dict.keys():
t = key.split()
author = f"{t[-1]}"
for str in t[0:-1]:
author += f" {str}"
authors.append(author)
approved = """Disclaimer
----------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review, the USGS
reserves the right to update the software as needed pursuant to further analysis
and review. No warranty, expressed or implied, is made by the USGS or the U.S.
Government as to the functionality of the software and related material nor
shall the fact of release constitute any such warranty. Furthermore, the
software is released on condition that neither the USGS nor the U.S. Government
shall be held liable for any damages resulting from its authorized or
unauthorized use.
"""
preliminary = """Disclaimer
----------
This software is preliminary or provisional and is subject to revision. It is
being provided to meet the need for timely best science. The software has not
received final approval by the U.S. Geological Survey (USGS). No warranty,
expressed or implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the fact of release
constitute any such warranty. The software is provided on the condition that
neither the USGS nor the U.S. Government shall be held liable for any damages
resulting from the authorized or unauthorized use of the software.
"""
def get_disclaimer():
# get current branch
branch = get_branch()
if branch.lower().startswith("release") or "master" in branch.lower():
disclaimer = approved
is_approved = True
else:
disclaimer = preliminary
is_approved = False
return is_approved, disclaimer
def get_branch():
branch = None
# determine if branch defined on command line
for argv in sys.argv:
if "master" in argv:
branch = "master"
elif "develop" in argv.lower():
branch = "develop"
if branch is None:
try:
# determine current branch
b = subprocess.Popen(
("git", "status"),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).communicate()[0]
if isinstance(b, bytes):
b = b.decode("utf-8")
for line in b.splitlines():
if "On branch" in line:
branch = line.replace("On branch ", "").rstrip()
except:
msg = "Could not determine current branch. Is git installed?"
raise ValueError(msg)
return branch
def get_version_str(v0, v1, v2):
version_type = (f"{v0}", f"{v1}", f"{v2}")
version = ".".join(version_type)
return version
def get_tag(v0, v1, v2):
tag_type = (f"{v0}", f"{v1}", f"{v2}")
tag = ".".join(tag_type)
return tag
def get_software_citation(version, is_approved):
now = datetime.datetime.now()
sb = ""
if not is_approved:
sb = " — release candidate"
# format author names
line = "["
for ipos, author in enumerate(authors):
if ipos > 0:
line += ", "
if ipos == len(authors) - 1:
line += "and "
sv = author.split()
tauthor = f"{sv[0]}"
if len(sv) < 3:
gname = sv[1]
if len(gname) > 1:
tauthor += f", {gname}"
else:
tauthor += f", {gname[0]}."
else:
tauthor += f", {sv[1][0]}. {sv[2][0]}."
# add formatted author name to line
line += tauthor
# add the rest of the citation
line += (
f", {now.year}, FloPy v{version}{sb}: "
f"U.S. Geological Survey Software Release, {now:%d %B %Y}, "
"https://doi.org/10.5066/F7BK19FH]"
"(https://doi.org/10.5066/F7BK19FH)"
)
return line
def update_version():
name_pos = None
try:
file = "version.py"
fpth = os.path.join(file_paths[file], file)
vmajor = 0
vminor = 0
vmicro = 0
lines = [line.rstrip("\n") for line in open(fpth, "r")]
for idx, line in enumerate(lines):
t = line.split()
if "major =" in line:
vmajor = int(t[2])
elif "minor =" in line:
vminor = int(t[2])
elif "micro =" in line:
vmicro = int(t[2])
elif "__version__" in line:
name_pos = idx + 1
except:
raise OSError("There was a problem updating the version file")
try:
# write new version file
f = open(fpth, "w")
f.write(
(
f"# {pak} version file automatically created "
f"using...{os.path.basename(__file__)}\n"
)
)
f.write(
f"# created on...{datetime.datetime.now():%B %d, %Y %H:%M:%S}\n"
)
f.write("\n")
f.write(f"major = {vmajor}\n")
f.write(f"minor = {vminor}\n")
f.write(f"micro = {vmicro}\n")
f.write('__version__ = f"{major}.{minor}.{micro}"\n')
# write the remainder of the version file
if name_pos is not None:
for line in lines[name_pos:]:
f.write(f"{line}\n")
f.close()
print("Successfully updated version.py")
except:
raise OSError("There was a problem updating the version file")
# update README.md with new version information
update_readme_markdown(vmajor, vminor, vmicro)
# update notebook_examples.md
update_notebook_examples_markdown()
# update code.json
update_codejson(vmajor, vminor, vmicro)
# update PyPI_release.md
update_PyPI_release(vmajor, vminor, vmicro)
def update_codejson(vmajor, vminor, vmicro):
# define json filename
file = "code.json"
json_fname = os.path.join(file_paths[file], file)
# get branch
branch = get_branch()
# create version
version = get_tag(vmajor, vminor, vmicro)
# load and modify json file
with open(json_fname, "r") as f:
data = json.load(f)
# modify the json file data
now = datetime.datetime.now()
sdate = now.strftime("%Y-%m-%d")
data[0]["date"]["metadataLastUpdated"] = sdate
if branch.lower().startswith("release") or "master" in branch.lower():
data[0]["version"] = version
data[0]["status"] = "Production"
else:
data[0]["version"] = version
data[0]["status"] = "Release Candidate"
# rewrite the json file
with open(json_fname, "w") as f:
json.dump(data, f, indent=4)
f.write("\n")
return
def update_readme_markdown(vmajor, vminor, vmicro):
# create disclaimer text
is_approved, disclaimer = get_disclaimer()
# define branch
if is_approved:
branch = "master"
else:
branch = "develop"
# create version
version = get_tag(vmajor, vminor, vmicro)
# read README.md into memory
file = "README.md"
fpth = os.path.join(file_paths[file], file)
with open(fpth, "r") as file:
lines = [line.rstrip() for line in file]
# rewrite README.md
terminate = False
f = open(fpth, "w")
for line in lines:
if "### Version " in line:
line = f"### Version {version}"
if not is_approved:
line += " — release candidate"
elif "[flopy continuous integration]" in line:
line = (
"[](https://github.com/modflowpy/flopy/actions/"
"workflows/commit.yml)".format(branch)
)
elif "[Read the Docs]" in line:
line = (
"[]"
"(https://github.com/modflowpy/flopy/actions/"
"workflows/rtd.yml)".format(branch)
)
elif "[Coverage Status]" in line:
line = (
"[]"
"(https://coveralls.io/github/modflowpy/"
"flopy?branch={0})".format(branch)
)
elif "[Binder]" in line:
# [](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop)
line = (
"[]"
"(https://mybinder.org/v2/gh/modflowpy/flopy.git/"
"{})".format(branch)
)
elif "doi.org/10.5066/F7BK19FH" in line:
line = get_software_citation(version, is_approved)
elif "Disclaimer" in line:
line = disclaimer
terminate = True
f.write(f"{line}\n")
if terminate:
break
f.close()
# write disclaimer markdown file
file = "DISCLAIMER.md"
fpth = os.path.join(file_paths[file], file)
f = open(fpth, "w")
f.write(disclaimer)
f.close()
return
def update_notebook_examples_markdown():
# create disclaimer text
is_approved, disclaimer = get_disclaimer()
# define branch
if is_approved:
branch = "master"
else:
branch = "develop"
# read notebook_examples.md into memory
file = "notebook_examples.md"
fpth = os.path.join(file_paths[file], file)
with open(fpth, "r") as file:
lines = [line.rstrip() for line in file]
# rewrite notebook_examples.md
terminate = False
f = open(fpth, "w")
for line in lines:
if "[Binder]" in line:
# [](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop)
line = (
"[]"
"(https://mybinder.org/v2/gh/modflowpy/flopy.git/"
"{})".format(branch)
)
f.write(f"{line}\n")
f.close()
def update_PyPI_release(vmajor, vminor, vmicro):
# create disclaimer text
is_approved, disclaimer = get_disclaimer()
# create version
version = get_tag(vmajor, vminor, vmicro)
# read README.md into memory
file = "PyPI_release.md"
fpth = os.path.join(file_paths[file], file)
with open(fpth, "r") as file:
lines = [line.rstrip() for line in file]
# rewrite README.md
terminate = False
f = open(fpth, "w")
for line in lines:
if "doi.org/10.5066/F7BK19FH" in line:
line = get_software_citation(version, is_approved)
elif "Disclaimer" in line:
line = disclaimer
terminate = True
f.write(f"{line}\n")
if terminate:
break
f.close()
return
if __name__ == "__main__":
update_version()
get_software_citation("3.1.1", True)
| [
"[email protected]"
]
| |
9386930dd28039deb8f1f7147efb17aeb95ab14a | bfa3752f3398fbd071cf6299ea5a1ecaab3b56b0 | /resources/image/reader.py | 8efb05d8d871cb4b32d18b82d72a7ffe109787d9 | []
| no_license | paka747/AmazFirConverter | 5be401016588f66ef16f7c58473e975ce13fc109 | 7bc63f3108e325d9c9ebf9f6da9004106f38c785 | refs/heads/master | 2020-12-10T18:38:50.856309 | 2020-01-13T19:37:47 | 2020-01-13T19:37:47 | 233,675,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | import logging
import io
from PIL import Image
import resources.image.color
class Reader():
def __init__(self, stream):
self._reader = stream
self._bip = True
def read(self):
signature = self._reader.read(4)
if signature[0] != ord('B') or signature[1] != ord('M'):
print(signature)
raise TypeError("Image signature doesn't match.")
if signature[2] == 0xff:
logging.warn("The image is 32bit.")
self._bip = False
if self._bip:
assert(False) # not implemented
else:
self.readHeader()
return self.readImage()
def readImage(self):
image = Image.new('RGBA', (self._width, self._height))
for y in range(self._height):
rowBytes = self._reader.read(self._rowLengthInBytes)
for x in range(self._width):
b = rowBytes[x * self._step]
g = rowBytes[x * self._step + 1]
r = rowBytes[x * self._step + 2]
if self._step == 4:
alpha = rowBytes[x * self._step + 3]
else:
alpha = 255
color = resources.image.color.Color.fromArgb(alpha, r, g, b)
image.putpixel((x,y), color)
return image
def readHeader(self):
logging.info("Reading image header(non-bip)...")
self._width = int.from_bytes(self._reader.read(4), byteorder='little')
self._height = int.from_bytes(self._reader.read(4), byteorder='little')
self._bitsPerPixel = int.from_bytes(self._reader.read(4), byteorder='little')
self._unknown1 = int.from_bytes(self._reader.read(4), byteorder='little')
self._unknown2 = int.from_bytes(self._reader.read(4), byteorder='little')
self._step = int(self._bitsPerPixel / 8)
self._rowLengthInBytes = self._width * self._step
self._transparency = False
logging.info("Image header was read:")
logging.info(f"Width: {self._width}, Height: {self._height}, RowLength: {self._rowLengthInBytes}")
logging.info(f"BPP: {self._bitsPerPixel}, Transparency: {self._transparency}")
| [
"[email protected]"
]
| |
b60ab3d3a4b85ca553f13b17a6cd77ce03fc9d73 | 1b456cb5c5f390e009fbf0de41848e3b342386cf | /turn_on_off_led.py | 70e3659742afc84df3bef59c2857a0e3fe32c964 | []
| no_license | ruthie1246/sca_pi | 71ce4f9fa9bf8960fca89f1e6bf6b42861cba466 | 143d5b5bbca9fa160799d2e7a72fe66e3268feb6 | refs/heads/master | 2020-03-19T08:37:50.791761 | 2018-06-07T21:19:51 | 2018-06-07T21:19:51 | 136,222,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/env python
# this script turns the Auto Flash Led on and then off
import RPi.GPIO as GPIO
import time
# breadboard setup
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# assign pin number for Auto Flash LED; pin 11 = GPIO 17
led_pin = 11
# set Auto Flash LED pin's mode as output
GPIO.setup(led_pin,GPIO.OUT)
# turn on Auto Flash LED
GPIO.output(led_pin,True)
time.sleep(2)
# turn off Auto Flash LED
GPIO.output(led_pin,False)
# reset GPIO resources used by script
GPIO.cleanup()
| [
"[email protected]"
]
| |
b1b18bc452761dd1d27d1780a8e7572981ae0eb3 | dd3046b6357e7c69c895992f61dd231190bd0546 | /example/linkage_demo/work_with_AlwaysAI/pedestrian_tracking_demo/venv/lib/python3.6/site-packages/pip/_internal/cli/req_command.py | c011d150cb35fae86540a98ebd1e64eaae6624ae | [
"Apache-2.0"
]
| permissive | baidu-research/hydra-vcam | 0bb7519792d7c57b868378c0924612883db7ff2d | 035d8119f4b1eab0da9153abc3d085a34331752d | refs/heads/master | 2023-08-28T18:19:28.824791 | 2021-09-01T00:38:41 | 2021-10-22T23:47:10 | 401,811,271 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,891 | py | """Contains the Command base classes that depend on PipSession.
The classes in this module are in a separate module so the commands not
needing download / PackageFinder capability don't unnecessarily import the
PackageFinder machinery and all its vendored dependencies, etc.
"""
import logging
import os
from functools import partial
from pip._vendor.six import PY2
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.command_context import CommandContextMixIn
from pip._internal.exceptions import CommandError, PreviousBuildDirError
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.network.session import PipSession
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.constructors import (
install_req_from_editable,
install_req_from_line,
install_req_from_parsed_requirement,
install_req_from_req_string,
)
from pip._internal.req.req_file import parse_requirements
from pip._internal.self_outdated_check import pip_self_version_check
from pip._internal.utils.temp_dir import tempdir_kinds
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, List, Optional, Tuple
from pip._internal.cache import WheelCache
from pip._internal.models.target_python import TargetPython
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.resolution.base import BaseResolver
from pip._internal.utils.temp_dir import TempDirectory, TempDirectoryTypeRegistry
logger = logging.getLogger(__name__)
class SessionCommandMixin(CommandContextMixIn):
"""
A class mixin for command classes needing _build_session().
"""
def __init__(self):
# type: () -> None
super(SessionCommandMixin, self).__init__()
self._session = None # Optional[PipSession]
@classmethod
def _get_index_urls(cls, options):
# type: (Values) -> Optional[List[str]]
"""Return a list of index urls from user-provided options."""
index_urls = []
if not getattr(options, "no_index", False):
url = getattr(options, "index_url", None)
if url:
index_urls.append(url)
urls = getattr(options, "extra_index_urls", None)
if urls:
index_urls.extend(urls)
# Return None rather than an empty list
return index_urls or None
def get_default_session(self, options):
# type: (Values) -> PipSession
"""Get a default-managed session."""
if self._session is None:
self._session = self.enter_context(self._build_session(options))
# there's no type annotation on requests.Session, so it's
# automatically ContextManager[Any] and self._session becomes Any,
# then https://github.com/python/mypy/issues/7696 kicks in
assert self._session is not None
return self._session
def _build_session(self, options, retries=None, timeout=None):
# type: (Values, Optional[int], Optional[int]) -> PipSession
assert not options.cache_dir or os.path.isabs(options.cache_dir)
session = PipSession(
cache=(
os.path.join(options.cache_dir, "http")
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
trusted_hosts=options.trusted_hosts,
index_urls=self._get_index_urls(options),
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
class IndexGroupCommand(Command, SessionCommandMixin):
"""
Abstract base class for commands with the index_group options.
This also corresponds to the commands that permit the pip version check.
"""
def handle_pip_version_check(self, options):
# type: (Values) -> None
"""
Do the pip version check if not disabled.
This overrides the default behavior of not doing the check.
"""
# Make sure the index_group options are present.
assert hasattr(options, 'no_index')
if options.disable_pip_version_check or options.no_index:
return
# Otherwise, check if we're using the latest version of pip available.
session = self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)
)
with session:
pip_self_version_check(session, options)
KEEPABLE_TEMPDIR_TYPES = [
tempdir_kinds.BUILD_ENV,
tempdir_kinds.EPHEM_WHEEL_CACHE,
tempdir_kinds.REQ_BUILD,
]
def with_cleanup(func):
# type: (Any) -> Any
"""Decorator for common logic related to managing temporary
directories.
"""
def configure_tempdir_registry(registry):
# type: (TempDirectoryTypeRegistry) -> None
for t in KEEPABLE_TEMPDIR_TYPES:
registry.set_delete(t, False)
def wrapper(self, options, args):
# type: (RequirementCommand, Values, List[Any]) -> Optional[int]
assert self.tempdir_registry is not None
if options.no_clean:
configure_tempdir_registry(self.tempdir_registry)
try:
return func(self, options, args)
except PreviousBuildDirError:
# This kind of conflict can occur when the user passes an explicit
# build directory with a pre-existing folder. In that case we do
# not want to accidentally remove it.
configure_tempdir_registry(self.tempdir_registry)
raise
return wrapper
class RequirementCommand(IndexGroupCommand):
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
super(RequirementCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(cmdoptions.no_clean())
@staticmethod
def determine_resolver_variant(options):
# type: (Values) -> str
"""Determines which resolver should be used, based on the given options."""
# We didn't want to change things for Python 2, since it's nearly done with
# and we're using performance improvements that only work on Python 3.
if PY2:
if '2020-resolver' in options.features_enabled:
return "2020-resolver"
else:
return "legacy"
if "legacy-resolver" in options.deprecated_features_enabled:
return "legacy"
return "2020-resolver"
@classmethod
def make_requirement_preparer(
cls,
temp_build_dir, # type: TempDirectory
options, # type: Values
req_tracker, # type: RequirementTracker
session, # type: PipSession
finder, # type: PackageFinder
use_user_site, # type: bool
download_dir=None, # type: str
):
# type: (...) -> RequirementPreparer
"""
Create a RequirementPreparer instance for the given parameters.
"""
temp_build_dir_path = temp_build_dir.path
assert temp_build_dir_path is not None
resolver_variant = cls.determine_resolver_variant(options)
if resolver_variant == "2020-resolver":
lazy_wheel = 'fast-deps' in options.features_enabled
if lazy_wheel:
logger.warning(
'pip is using lazily downloaded wheels using HTTP '
'range requests to obtain dependency information. '
'This experimental feature is enabled through '
'--use-feature=fast-deps and it is not ready for '
'production.'
)
else:
lazy_wheel = False
if 'fast-deps' in options.features_enabled:
logger.warning(
'fast-deps has no effect when used with the legacy resolver.'
)
return RequirementPreparer(
build_dir=temp_build_dir_path,
src_dir=options.src_dir,
download_dir=download_dir,
build_isolation=options.build_isolation,
req_tracker=req_tracker,
session=session,
progress_bar=options.progress_bar,
finder=finder,
require_hashes=options.require_hashes,
use_user_site=use_user_site,
lazy_wheel=lazy_wheel,
)
@classmethod
def make_resolver(
cls,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
options, # type: Values
wheel_cache=None, # type: Optional[WheelCache]
use_user_site=False, # type: bool
ignore_installed=True, # type: bool
ignore_requires_python=False, # type: bool
force_reinstall=False, # type: bool
upgrade_strategy="to-satisfy-only", # type: str
use_pep517=None, # type: Optional[bool]
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
# type: (...) -> BaseResolver
"""
Create a Resolver instance for the given parameters.
"""
make_install_req = partial(
install_req_from_req_string,
isolated=options.isolated_mode,
use_pep517=use_pep517,
)
resolver_variant = cls.determine_resolver_variant(options)
# The long import name and duplicated invocation is needed to convince
# Mypy into correctly typechecking. Otherwise it would complain the
# "Resolver" class being redefined.
if resolver_variant == "2020-resolver":
import pip._internal.resolution.resolvelib.resolver
return pip._internal.resolution.resolvelib.resolver.Resolver(
preparer=preparer,
finder=finder,
wheel_cache=wheel_cache,
make_install_req=make_install_req,
use_user_site=use_user_site,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
force_reinstall=force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=py_version_info,
)
import pip._internal.resolution.legacy.resolver
return pip._internal.resolution.legacy.resolver.Resolver(
preparer=preparer,
finder=finder,
wheel_cache=wheel_cache,
make_install_req=make_install_req,
use_user_site=use_user_site,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
force_reinstall=force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=py_version_info,
)
def get_requirements(
self,
args, # type: List[str]
options, # type: Values
finder, # type: PackageFinder
session, # type: PipSession
):
# type: (...) -> List[InstallRequirement]
"""
Parse command-line arguments into the corresponding requirements.
"""
requirements = [] # type: List[InstallRequirement]
for filename in options.constraints:
for parsed_req in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session):
req_to_add = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode,
user_supplied=False,
)
requirements.append(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req, None, isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
)
requirements.append(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
user_supplied=True,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
)
requirements.append(req_to_add)
# NOTE: options.require_hashes may be set if --require-hashes is True
for filename in options.requirements:
for parsed_req in parse_requirements(
filename,
finder=finder, options=options, session=session):
req_to_add = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
)
requirements.append(req_to_add)
# If any requirement has hash options, enable hash checking.
if any(req.has_hash_options for req in requirements):
options.require_hashes = True
if not (args or options.editables or options.requirements):
opts = {'name': self.name}
if options.find_links:
raise CommandError(
'You must give at least one requirement to {name} '
'(maybe you meant "pip {name} {links}"?)'.format(
**dict(opts, links=' '.join(options.find_links))))
else:
raise CommandError(
'You must give at least one requirement to {name} '
'(see "pip help {name}")'.format(**opts))
return requirements
@staticmethod
def trace_basic_info(finder):
# type: (PackageFinder) -> None
"""
Trace basic information about the provided objects.
"""
# Display where finder is looking for packages
search_scope = finder.search_scope
locations = search_scope.get_formatted_locations()
if locations:
logger.info(locations)
def _build_package_finder(
self,
options, # type: Values
session, # type: PipSession
target_python=None, # type: Optional[TargetPython]
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> PackageFinder
"""
Create a package finder appropriate to this requirement command.
:param ignore_requires_python: Whether to ignore incompatible
"Requires-Python" values in links. Defaults to False.
"""
link_collector = LinkCollector.create(session, options=options)
selection_prefs = SelectionPreferences(
allow_yanked=True,
format_control=options.format_control,
allow_all_prereleases=options.pre,
prefer_binary=options.prefer_binary,
ignore_requires_python=ignore_requires_python,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
target_python=target_python,
)
| [
"[email protected]"
]
| |
805fe9af7e02ba40fe0d350dea64824948b00342 | 7d08ef3bd07c73dd3f2ea7cf895446745ff4d631 | /student/migrations/0002_auto_20170202_0927.py | 7b58339fefc99639257a691eb43245eb83a7f844 | []
| no_license | Elite-Programmers/CAMS | 574eb946bf6c8c99cd6103f6c9ff5a75f9eeafd8 | a0327ba778778924165afbb5085c747cb722c026 | refs/heads/master | 2020-05-23T09:09:14.460122 | 2017-02-12T20:14:40 | 2017-02-12T20:14:40 | 80,438,454 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,559 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 09:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Att',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sur8', models.FloatField(blank=True, null=True)),
('year', models.DateField(blank=True, null=True)),
('slot', models.CharField(blank=True, max_length=1, null=True)),
('scode', models.CharField(blank=True, max_length=8, null=True)),
('day', models.CharField(blank=True, max_length=3, null=True)),
],
options={
'db_table': 'att',
'managed': False,
},
),
migrations.CreateModel(
name='FacEnr',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sur7', models.FloatField(blank=True, null=True)),
('fid', models.CharField(blank=True, max_length=7, null=True)),
('slot', models.CharField(blank=True, max_length=1, null=True)),
('day', models.CharField(blank=True, max_length=3, null=True)),
('section', models.CharField(max_length=1)),
('branch', models.CharField(blank=True, max_length=2, null=True)),
('year', models.FloatField(blank=True, null=True)),
('scode', models.CharField(blank=True, max_length=8, null=True)),
],
options={
'db_table': 'fac_enr',
'managed': False,
},
),
migrations.CreateModel(
name='StudentSubEnr',
fields=[
('sur3', models.FloatField(primary_key=True, serialize=False)),
('scode', models.CharField(blank=True, max_length=8, null=True)),
('sname', models.CharField(blank=True, max_length=50, null=True)),
('branch', models.CharField(blank=True, max_length=2, null=True)),
],
options={
'db_table': 'student_sub_enr',
'managed': False,
},
),
migrations.CreateModel(
name='TestSub',
fields=[
('sur5', models.FloatField(primary_key=True, serialize=False)),
('tno', models.FloatField(blank=True, null=True)),
('scode', models.CharField(blank=True, max_length=8, null=True)),
],
options={
'db_table': 'test_sub',
'managed': False,
},
),
migrations.CreateModel(
name='TestTme',
fields=[
('sur4', models.FloatField(primary_key=True, serialize=False)),
('tno', models.FloatField(blank=True, null=True)),
('dte', models.DateField(blank=True, null=True)),
('slot', models.CharField(blank=True, max_length=1, null=True)),
],
options={
'db_table': 'test_tme',
'managed': False,
},
),
migrations.AlterModelTable(
name='studentsemenr',
table='student_sem_enr',
),
]
| [
"[email protected]"
]
| |
8a94846bb25fe3e5e904a7897250e5bbd62b2faa | 5467ad49eedeababafa1a9287bf1e0d2263c44fd | /src/benchmarks/pyvnumpy.py | b26a1cba161807cddfa13fcf6f784c10eb819ee8 | []
| no_license | Shadesfear/bachelor | ea5456e8e89218890ea16d882872e2a8970b896e | 8dc07b6b1a516ee80bfe97feb599dffc3dc48614 | refs/heads/master | 2022-07-08T22:03:14.744995 | 2019-06-05T13:55:21 | 2019-06-05T13:55:21 | 170,278,348 | 0 | 0 | null | 2022-06-21T21:41:45 | 2019-02-12T08:12:21 | PostScript | UTF-8 | Python | false | false | 784 | py | import benchpress as bp
from benchpress.suite_util import BP_ROOT
import random
scripts = [
('Python', 'python_random_centroids', ["10", "20", "50", "100", "1000", "5000"]),
('Numpy', 'numpy_random_centroids', ["10", "20", "50", "100", "1000", "5000"]),
# ('numpy_version', 'pure-numpy-kmeans', ["10", "20", "30", "40", "50", "100", "500"])
]
cmd_list = []
for label, name, sizes in scripts:
for size in sizes:
full_label = "%s/%s" % (label, size)
bash_cmd = "python /home/chris/Documents/bachelor/src/{script}.py {size}" \
.format(root=BP_ROOT, script=name, size=size)
cmd_list.append(bp.command(bash_cmd, full_label))
# Finally, we build the Benchpress suite, which is written to `--output`
bp.create_suite(cmd_list)
| [
"[email protected]"
]
| |
6a9feb892b18246836a3f72e8fa01e86e03bc329 | f2ef824c0ae5b40662a0ac72d5c122d29fae126b | /translation3.py | fd0126a89e684c1520e35718e541a77069f57556 | []
| no_license | novanator/translation-py | 1e02d0dc23705fa0729182f9ea4258b3f3db9e5c | 84188a32b27e541c072a7691ad915ac0a6aab984 | refs/heads/master | 2016-09-06T09:12:06.446249 | 2013-08-22T20:56:09 | 2013-08-22T20:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | """
Author: Novanator 2012
Program: Translation3
"""
import random
from btree import Node
from btree import BinTree
file = open ("list.txt", "r")
contents = file.read ()
words = contents.split ()
random.shuffle(words)
tree=BinTree()
for i in words:
tree.put(i)
l=[]
for x in words:
if tree.exists(x[::-1]) and x not in l:
l.append(x)
l2=[]
for x in l:
if x not in l2:
l2.append(x)
l2.append(x[::-1])
for i in range (0,len(l2),2):
print(l2[i],l2[i+1])
| [
"[email protected]"
]
| |
fdd8e9b78b9290cfe113da295706a008023d84a6 | 0280c9cdab7763ef6710e8f7ed6e94740dfda374 | /visualization/Python_Vis/lr_scheduler.py | 602c362be38f2bf6aa4c346a1a52807470e86ccb | []
| no_license | Sandy-Zeng/RandomLR | 73181bbd6d946d1163b7b337524b070285b61f20 | ec0f2ff7bf312c015e54b39815d140b8855ae145 | refs/heads/master | 2020-04-12T08:00:19.208337 | 2019-08-25T14:29:28 | 2019-08-25T14:29:28 | 162,378,477 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,490 | py | import numpy as np
import time
import math
from keras.callbacks import *
def U(tmp_lr,random_range):
np.random.seed(int(time.time()))
tmp_lr = np.random.random() * tmp_lr * random_range
# tmp_lr = tmp_lr + tmp_lr * np.random.random()
return tmp_lr
def UA(tmp_lr,random_range):
np.random.seed(int(time.time()))
tmp_lr = tmp_lr + tmp_lr * np.random.random() * random_range
return tmp_lr
def N(tmp_lr, mu=4, sigma=1):
np.random.seed(int(time.time()))
tmp_lr_factor = np.random.normal(mu, sigma)
tmp_lr_factor = abs(tmp_lr_factor) * tmp_lr
tmp_lr = tmp_lr + tmp_lr_factor
return tmp_lr
class StepDecay(Callback):
def __init__(self,epochs=200,init_lr=1e-3,distribution_method='N',random_potion=0.3,random_range=10):
super(StepDecay, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
self.count_down = 19
self.count = 0
self.random_lr = init_lr
self.last_lr = init_lr
self.beta = 0.5
def lr_schedule(self,epoch):
#Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs * 0.4
if epoch > self.epochs * 0.9:
lr *= 0.5e-3
left = self.epochs * 0.9
right = self.epochs
elif epoch > self.epochs * 0.8:
lr *= 1e-3
left = self.epochs * 0.8
right = self.epochs * 0.9
elif epoch > self.epochs * 0.6:
lr *= 1e-2
left = self.epochs * 0.6
right = self.epochs * 0.8
elif epoch > self.epochs * 0.4:
lr *= 1e-1
left = self.epochs * 0.4
right = self.epochs * 0.6
if epoch == self.epochs * 0.9+1:
self.last_lr = self.linear_init_lr * 0.5e-3
elif epoch == self.epochs * 0.8+1:
self.last_lr = self.linear_init_lr * 1e-3
elif epoch == self.epochs * 0.6+1:
self.last_lr = self.linear_init_lr * 1e-2
elif epoch == self.epochs * 0.4+1:
self.last_lr = self.linear_init_lr * 1e-1
bounder = left + int((right - left) * self.random_potion)
if epoch < bounder:
print('Bounder:', bounder)
if self.distribution_method == 'U':
# if (epoch - left) < ((right - left)*(self.random_potion/2)):
# adaptive_range = (epoch-left)/float((right - left) * (self.random_potion)/2) * self.random_range + 0.1
# lr = U(lr,adaptive_range)
# else:
# lr = U(lr,self.random_range+0.1)
# adaptive_range = (right - epoch) / float(
# (right - left)) * self.random_range + 0.1
# lr = U(lr, adaptive_range)
lr = U(lr, self.random_range)
# lr = (lr + self.last_lr)/2
lr = self.beta * self.last_lr + (1-self.beta)*lr
self.last_lr = lr
if self.distribution_method == 'UC':
if self.count == 0:
lr = U(lr,self.random_range)
self.random_lr = lr
self.count = self.count_down
else:
lr = self.random_lr
self.count -= 1
if self.distribution_method == 'N':
lr = N(tmp_lr=lr,mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class StepDecayPost(Callback):
def __init__(self, epochs=200, init_lr=1e-3, distribution_method='N', random_portion=0.3, random_range=10):
super(StepDecayPost, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_portion = random_portion
self.random_range = random_range
self.count_down = 19
self.count = 0
self.random_lr = init_lr
def lr_schedule(self,epoch):
#Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs * 0.4
if epoch > self.epochs * 0.9:
lr *= 0.5e-3
left = self.epochs * 0.9
right = self.epochs
elif epoch > self.epochs * 0.8:
lr *= 1e-3
left = self.epochs * 0.8
right = self.epochs * 0.9
elif epoch > self.epochs * 0.6:
lr *= 1e-2
left = self.epochs * 0.6
right = self.epochs * 0.8
elif epoch > self.epochs * 0.4:
lr *= 1e-1
left = self.epochs * 0.4
right = self.epochs * 0.6
bounder = left + int((right - left) * self.random_portion)
if epoch < bounder and epoch>self.epochs*0.4:
print('Bounder:', bounder)
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'UA':
lr = UA(lr,self.random_range)
if self.distribution_method == 'UC':
if self.count == 0:
lr = U(lr,self.random_range)
self.random_lr = lr
self.count = self.count_down
else:
lr = self.random_lr
self.count -= 1
if self.distribution_method == 'N':
lr = N(lr)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class BatchRLR(Callback):
def __init__(self,epochs=200,init_lr=1e-3,distribution_method='N',random_potion=0.3,random_range=10):
super(BatchRLR, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
self.count_down = 19
self.count = 0
self.last_lr = init_lr
self.beta = 0.7
self.base_lr = init_lr
def lr_schedule(self,batch):
#Learning Rate Schedule
lr = self.base_lr
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
lr = self.beta * self.last_lr + (1-self.beta) * lr
if self.distribution_method == 'N':
lr = N(lr,random_range=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
return lr
def on_batch_begin(self, batch, logs=None):
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(batch=batch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_begin(self, epoch, logs=None):
if epoch > self.epochs * 0.9:
self.base_lr *= 0.5e-3
elif epoch > self.epochs * 0.8:
self.base_lr *= 1e-3
elif epoch > self.epochs * 0.6:
self.base_lr *= 1e-2
elif epoch > self.epochs * 0.4:
self.base_lr *= 1e-1
if epoch == self.epochs * 0.9 + 1:
self.last_lr = self.linear_init_lr * 0.5e-3
elif epoch == self.epochs * 0.8 + 1:
self.last_lr = self.linear_init_lr * 1e-3
elif epoch == self.epochs * 0.6 + 1:
self.last_lr = self.linear_init_lr * 1e-2
elif epoch == self.epochs * 0.4 + 1:
self.last_lr = self.linear_init_lr * 1e-1
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class Constant(Callback):
def __init__(self,epochs=200,init_lr=1e-3,distribution_method='N',random_potion=0.3,random_range=10):
super(Constant, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
def lr_schedule(self,epoch):
#Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs
bounder = left + int((right - left) * self.random_potion)
if epoch < bounder:
print('Bounder:', bounder)
if self.distribution_method == 'U':
lr = U(lr,self.random_range)
if self.distribution_method == 'N':
lr = N(lr,mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class DenseNetSchedule(Callback):
def __init__(self,epochs=300,init_lr=1e-3,distribution_method='N',random_range=10,random_potion=0.3):
super(DenseNetSchedule,self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_range = random_range
self.random_potion = random_potion
def lr_schedule(self,epoch):
# Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs * 0.5
if epoch >= self.epochs * 0.75:
lr *= 1e-2
left = self.epochs * 0.75
right = self.epochs
elif epoch >= self.epochs * 0.5:
lr *= 1e-1
left = self.epochs * 0.5
right = self.epochs * 0.75
bounder = left + int((right - left) * self.random_potion)
if epoch < bounder and epoch>= self.epochs*0.5:
print('Bounder:', bounder)
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
# lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class Warm_Start_Scheduler(Callback):
def __init__(self,init_lr=1e-3,Te=10,multFac=2,distribution_method='N',random_range=10,random_potion=0.5,epochs=200):
super(Warm_Start_Scheduler,self).__init__()
self.Te = Te
self.tt = 0
self.t0 = math.pi / 2.0
self.TeNext = Te
self.multFactor = multFac
self.init_lr = init_lr
self.distribution_method = distribution_method
self.random_range = random_range
self.random_potion = random_potion
self.epochs = epochs
self.iscycle = True
self.last_lr = init_lr
def lr_schedule(self,epoch):
def WRSGN(epoch, tmp_lr):
dt = 2.0 * math.pi / float(2.0 * self.Te)
self.tt = self.tt + float(dt)
if self.tt >= math.pi:
self.tt = self.tt - math.pi
curT = self.t0 + self.tt
new_lr = tmp_lr * (1.0 + math.sin(curT)) / 2.0 # lr_min = 0, lr_max = lr
if (epoch + 1 == self.TeNext): # time to restart
self.tt = 0 # by setting to 0 we set lr to lr_max, see above
self.Te = self.Te * self.multFactor # change the period of restarts
self.TeNext = self.TeNext + self.Te # note the next restart's epoch
if self.TeNext > self.epochs:
self.iscycle = False
self.last_lr = new_lr
return new_lr
lr = self.init_lr
if self.iscycle:
lr = WRSGN(epoch, lr)
else:
lr = self.last_lr
if epoch < self.epochs * self.random_potion and epoch>80 and epoch<130:
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class Exp(Callback):
def __init__(self,epochs=200,init_lr=1e-3,decay_rate=0.96,decay_step=1000,distribution_method='N',random_potion=0.3,random_range=10):
super(Exp,self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
self.decay_rate = decay_rate
self.global_step = 0.
self.decay_step = decay_step
self.history = {}
self.israndom = False
def lr_schedule(self):
lr = self.linear_init_lr
lr = lr * math.pow(self.decay_rate,math.floor(self.global_step/ self.decay_step))
if self.israndom == True:
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
# print('Learning rate: ', lr)
return lr
def on_train_begin(self, logs={}):
logs = logs or {}
print(self.global_step)
if self.global_step == 0:
print(self.linear_init_lr)
K.set_value(self.model.optimizer.lr, self.linear_init_lr)
else:
K.set_value(self.model.optimizer.lr, self.lr_schedule())
def on_batch_end(self, epoch, logs=None):
# lr = float(K.get_value(self.model.optimizer.lr))
logs = logs or {}
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.global_step)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.global_step = self.global_step + 1
lr = self.lr_schedule()
K.set_value(self.model.optimizer.lr, lr)
# def on_epoch_end(self, epoch, logs=None):
# logs = logs or {}
# logs['lr'] = K.get_value(self.model.optimizer.lr)
def on_epoch_begin(self, epoch, logs=None):
logs = logs or {}
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
logs['lr'] = lr
print('Learning Rate:',lr)
if epoch > 80 and epoch<130:
self.israndom = True
else:
self.israndom = False
class RetinaSchedule(Callback):
def __init__(self,epochs=150,init_lr=1e-1,distribution_method='N',random_range=10):
super(RetinaSchedule,self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_range = random_range
def lr_schedule(self,epoch):
# Learning Rate Schedule
lr = self.linear_init_lr
if epoch > 140:
lr *= 1e-2
elif epoch > 120:
lr *= 1e-1
if epoch>120:
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
# lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
| [
"[email protected]"
]
| |
75a66330ae5b0d0bb7cb5c16f81ab5a7898ce9bc | 29f56da5b7a0c7b5beeb514cf490cddc7845a6bc | /flask_app/code/services/clouds.py | 954d9b3ef60998490b9e66badf9ee288b296274a | []
| no_license | NikolaDojic/aiven-cloud-selection | 4e18da21da602656a0c9fd279473aa44fc36cadc | 6d5a3654bd3c496ae2e104a38650c841f2a15997 | refs/heads/master | 2023-03-06T23:20:14.062833 | 2021-02-21T20:34:21 | 2021-02-21T20:34:21 | 340,959,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | import traceback
from math import sin, cos, sqrt, atan2, radians
from services.http_client import request
from services.platforms import platform_service
from services.regions import region_service
class CloudService:
def __init__(self):
self.clouds = []
self.get_clouds()
def get_clouds(self):
self.clouds = []
try:
self.clouds = [
self.__normalize_cloud(cloud)
for cloud in request("GET", "clouds").get("clouds", [])
]
platform_service.platforms_from_clouds(self.clouds)
region_service.regions_from_clouds(self.clouds)
except Exception as e:
traceback.print_exc()
print(e.__class__, e)
def filter_by_platform(self, platform_id, clouds=None):
clouds = clouds or self.clouds
if not platform_id:
return [*clouds]
filtered_clouds = [
cloud for cloud in clouds
if cloud["cloud_name"].startswith(f"{platform_id}-")
]
return filtered_clouds
def filter_by_region(self, region, clouds=None):
clouds = clouds or self.clouds
if not region:
return [*clouds]
filtered_clouds = [
cloud for cloud in clouds if cloud["location"]["region"] == region
]
return filtered_clouds
def filter_clouds(self, platform="", region=""):
clouds = self.filter_by_platform(platform)
clouds = self.filter_by_region(region, clouds)
return clouds
def __normalize_cloud(self, cloud):
location = {}
location_keys = [key for key in cloud if key.startswith("geo_")]
for key in location_keys:
location[key[4:]] = cloud[key]
del cloud[key]
cloud["location"] = location
return cloud
def get_closest_cloud(self, user_coordinates, clouds=None):
clouds = clouds or self.clouds
shortest_distance = 120000 #absurdly large number
closest_cloud = None
for cloud in (self.clouds):
current_distance = distance_calculator(cloud["location"],
user_coordinates)
if shortest_distance > current_distance:
shortest_distance = current_distance
closest_cloud = cloud
return closest_cloud
#credit Michael0x2a https://stackoverflow.com/a/19412565/7562654
def distance_calculator(coord1, coord2):
# approximate radius of earth in km
R = 6371.0
lat1 = radians(coord1["latitude"])
lon1 = radians(coord1["longitude"])
lat2 = radians(coord2["latitude"])
lon2 = radians(coord2["longitude"])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
cloud_service = CloudService()
| [
"[email protected]"
]
| |
62363c4bcc24d91b45188cf7f657fda66070fe0d | 37146b1529bfb8094d5ef17734498aba1e701b33 | /python/python_course/pythonStudy4/FangWenKongZhiExample.py | 82883cdaa0bcd2c25e723093041cf2edf8fa576c | []
| no_license | nanfeng729/code-for-test | 9c8e3736ac4c86a43002a658faf37349817de130 | 28071453c38742bffd5b5bdf7461bffdaa6c96be | refs/heads/master | 2022-10-07T17:59:44.558278 | 2020-06-06T10:19:19 | 2020-06-06T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | class fwkz:
a = 0
b = 0
_c = 0 # 定义受保护的属性
__d = 0 # 定义私有的属性
def jisuan(self):
return self.a + self.b
def jisuan2(self):
return self.a + self._c
def jisuan3(self):
return self.b + self.__d | [
"[email protected]"
]
| |
da9a6ff7b3c39d31a27e2c7a7ff298d9bdd5cb71 | cbc4ecd12791144f1755fccd27e51bf3e08cf53b | /account/migrations/0002_contact.py | 33a3e6729f55871839cee091a94837cae74d6fa3 | []
| no_license | Sr0o/bookmarks | b501c0123e1e84007f5981e04f7f8c64bfc49dd1 | f89a87d3a7f11454cb616e7ac350ecd71e1ddfd1 | refs/heads/master | 2021-05-07T01:03:40.380940 | 2017-11-11T03:41:10 | 2017-11-11T03:41:10 | 108,243,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-19 09:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('user_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rel_from_set', to=settings.AUTH_USER_MODEL)),
('user_tor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rel_to_set', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
]
| [
"[email protected]"
]
| |
605745dd340911ea20f595bd26b3485acb318778 | ef5cf217c8f85f84a8c9ef4f508ccba547955cd8 | /OFB_api/server.py | 5af9457a930ebcdcaf23f16095e3d5e3966ab9ca | []
| no_license | Eleonore9/Envir_hack_OpenForBusiness | a28b0d605587cd3f6c1eb74eb31ba45903aa89f5 | 1e11db3bd9a66b67a45340ee90bdabe862286fd4 | refs/heads/master | 2021-01-22T07:18:10.908823 | 2015-05-09T23:21:33 | 2015-05-09T23:21:33 | 32,635,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from flask import Flask, render_template
import get_amee_api as amee
app = Flask(__name__)
@app.route('/api/get_amee_data/')
# Use get_amee_api to retrieve data
# and feed it into the db
def feed_amee_data():
api_call = amee.get_amee_data()
json_data = amee.get_sustainability_data(api_call)
print json_data
return json_data
@app.route('/api/serve_amee_data/')
# Get data from the db and serve it
def serve_amee_data():
pass
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
]
| |
60ba9feb268c4d6bdb08de9c05f99d96d934f28e | 6b95f96e00e77f78f0919c10b2c90f116c0b295d | /TelstraTPN/models/body.py | 6402d1a63505555d93481a28e94f4ec6e6af57af | []
| no_license | telstra/Programmable-Network-SDK-python | 0522b54dcba48e16837c6c58b16dabde83b477d5 | d1c19c0383af53a5f09a6f5046da466ae6e1d97a | refs/heads/master | 2021-09-19T17:09:06.831233 | 2018-07-30T03:22:26 | 2018-07-30T03:22:26 | 113,531,312 | 3 | 1 | null | 2018-07-30T03:22:27 | 2017-12-08T04:23:15 | Python | UTF-8 | Python | false | false | 3,864 | py | # coding: utf-8
"""
Telstra Programmable Network API
Telstra Programmable Network is a self-provisioning platform that allows its users to create on-demand connectivity services between multiple end-points and add various network functions to those services. Programmable Network enables to connectivity to a global ecosystem of networking services as well as public and private cloud services. Once you are connected to the platform on one or more POPs (points of presence), you can start creating those services based on the use case that you want to accomplish. The Programmable Network API is available to all customers who have registered to use the Programmable Network. To register, please contact your account representative. # noqa: E501
OpenAPI spec version: 2.4.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Body(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'renewal_option': 'int'
}
attribute_map = {
'renewal_option': 'renewal-option'
}
def __init__(self, renewal_option=None): # noqa: E501
"""Body - a model defined in OpenAPI""" # noqa: E501
self._renewal_option = None
self.discriminator = None
if renewal_option is not None:
self.renewal_option = renewal_option
@property
def renewal_option(self):
"""Gets the renewal_option of this Body. # noqa: E501
\"Renewal Option: 0=Auto Disconnect, 1=Auto Renew, 2=Pay per hour\" # noqa: E501
:return: The renewal_option of this Body. # noqa: E501
:rtype: int
"""
return self._renewal_option
@renewal_option.setter
def renewal_option(self, renewal_option):
"""Sets the renewal_option of this Body.
\"Renewal Option: 0=Auto Disconnect, 1=Auto Renew, 2=Pay per hour\" # noqa: E501
:param renewal_option: The renewal_option of this Body. # noqa: E501
:type: int
"""
self._renewal_option = renewal_option
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Body):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
4d7aee584bfa414d4dd6b80729501ae6eea347e9 | fc754f2fe3d3bc5ab0b2800a5dc0863e5918b705 | /Survival/BayesianOptimization /train_bayesian_rn.py | 3366d7746b60ec85f5f68b07c688248ecfdea015 | []
| no_license | eunji6546/studying_ML | 034b9f482961a15d4508910e0e2567ac69fb8357 | 96968d32d4ac65a5099d4f1596c36341540c7ac3 | refs/heads/master | 2021-01-01T18:59:44.119346 | 2018-05-28T15:40:27 | 2018-05-28T15:40:27 | 98,482,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,731 | py | from keras.layers import Dense, Dropout, Input
from keras.models import Model
import tensorflow as tf
import keras.backend.tensorflow_backend as ktf
from keras.callbacks import ModelCheckpoint
import os
import argparse
import pickle
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
import pickle
from bayes_opt import BayesianOptimization
import
from sklearn.decomposition import PCA
import pandas as pd
import rn_ae
import variables
# RN version
def cindex(cen, surv, y_pred):
N = surv.shape[0]
Comparable = np.zeros([N,N])
for i in range(N):
for j in range(N):
if cen[i] == 0 and cen[j] == 0:
if surv[i] != surv[j]:
Comparable[i, j] = 1
elif cen[i] == 1 and cen[j] == 1:
Comparable[i, j] = 0
else: # one sample is censored and the other is not
if cen[i] == 1:
if surv[i] >= surv[j]:
Comparable[i, j] = 1
else:
Comparable[i, j] = 0
else: # cen[j] == 1
if surv[j] >= surv[i]:
Comparable[i, j] = 1
else:
Comparable[i, j] = 0
p2, p1 = np.where(Comparable==1)
Y = y_pred
c=0
N_valid_sample = p1.shape[0]
for i in range(N_valid_sample):
if cen[p1[i]] == 0 and cen[p2[i]] == 0:
if Y[p1[i]] == Y[p2[i]]:
c = c + 0.5
elif Y[p1[i]] > Y[p2[i]] and surv[p1[i]] > surv[p2[i]]:
c = c + 1
elif Y[p2[i]] > Y[p1[i]] and surv[p2[i]] > surv[p1[i]]:
c = c + 1
elif cen[p1[i]] == 1 and cen[p2[i]] == 1:
continue # do nothing - samples cannot be ordered
else: # one sample is censored and the other is not
if cen[p1[i]] == 1:
if Y[p1[i]] > Y[p2[i]] and surv[p1[i]] > surv[p2[i]]:
c = c + 1
elif Y[p1[i]] == Y[p2[i]]:
c = c + 0.5
else: # cen[p2[i]] == 1
if Y[p2[i]] > Y[p1[i]] and surv[p2[i]] > surv[p1[i]]:
c = c + 1
elif Y[p1[i]] == Y[p2[i]]:
c = c + 0.5
c = c*1.0 / N_valid_sample
return c
def run_rn(g_mlp_layers_0, f_mlp_layers_0,rn_dim ):
#(g_mlp_layers_0, g_mlp_layers_1, f_mlp_layers_0, f_mlp_layers_1,rn_dim ):
os.environ["CUDA_VISIBLE_DEVICES"] = "3" #gpunum
def get_session(gpu_fraction=1):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=True)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
ktf.set_session(get_session())
# Hyper Parameters
g_mlp_layers_0 = int(g_mlp_layers_0) * 16 * 2
g_mlp_layers_1 = int(g_mlp_layers_0) #* 16
f_mlp_layers_0 = int(f_mlp_layers_0) * 32 * 2
f_mlp_layers_1 = int(f_mlp_layers_0) #* 32
rn_dim = int(rn_dim)
variables.rn_dim = rn_dim
g_MLP_layers = [g_mlp_layers_0, g_mlp_layers_1]
f_MLP_layers = [f_mlp_layers_0, f_mlp_layers_1]
rn_args = {
"g_MLP_layers": g_MLP_layers,
"f_MLP_layers": f_MLP_layers,
"learning_rate": variables.rn_learning_rate,
"epochs": variables.rn_epochs,
"batch_size": variables.rn_batch_size,
"model_name": "rn_dim%d_qry%s_by%s" %(variables.rn_dim, str(variables.rn_query), str(variables.rn_relation_by))
}
print(rn_args)
# Load Data
cancer_type_list = ['LUAD', 'LUSC']
for cancer_type in cancer_type_list:
# TODO : do i need to change this to K-fold??
rn_data_all = rn_ae.load_data_by_sample(cancer_type)
X = rn_data_all.drop(['y','c'], axis=1).values
print "X.shape ", X.shape
Y = rn_data_all['y'].values
print "Y.shape ", Y.shape
C = rn_data_all['c'].values
print "C.shape ", C.shape
x_trn, x_tst, y_trn, y_tst, c_trn, c_tst = \
train_test_split(X, Y, C, test_size=80, random_state=7)
model = rn_ae.train_RNs(x_trn, y_trn, rn_args)
rn_predict = model.predict(x_tst)
rn_score = cindex(c_tst, y_tst, rn_predict)
print "[", cancer_type,"]"
print rn_args
print rn_score
return rn_score
if __name__ == "__main__":
bo_dict = {
"g_mlp_layers_0" : (1, 3),
#"g_mlp_layers_1"
"f_mlp_layers_0" : (1, 3),
# "f_mlp_layers_1"
"rn_dim" : (500, 1500)
}
v1BO = BayesianOptimization(run_rn, bo_dict,verbose=True)
v1BO.explore({
"g_mlp_layers_0" : (1,1, 3),
#"g_mlp_layers_1"
"f_mlp_layers_0" : (1,1, 3),
# "f_mlp_layers_1"
"rn_dim" : (500, 500, 1500)
})
gp_params = {"alpha": 1e-5}
v1BO.maximize(init_points = 2, n_iter=30, acq='ucb', kappa=5)
print('Final Results')
#print('max %f' % v1BO.res['max']['max_val'])
#print('***<max>****')
#print(v1BO.res['max'])
#print('***<all>***')
#print(v1BO.res['all'])
results.append(v1BO.res['all'])
#print(results)
print(v1BO.res)
with open('./rn/BO_Result_'+cancer_type+'.txt','at' ) as f:
params =v1BO.res['all']['params']
values = v1BO.res['all']['values']
keys = params[0].keys()
for i in range(2) :
line = [cancer_type, feature_type]
for k in keys :
line.append(str(params[i][k]))
line.append(str(values[i]))
f.write('\t'.join(line)+'\n')
| [
"[email protected]"
]
| |
62f589b3a1d1afe7c7c16ad3f3034d502934f87a | f9392b6e2def41d2c2dc2f50611237cd65ecb444 | /nthToLast.py | e2615074efdebd60b468855777f1d2752dd4f58c | []
| no_license | chloevo26/Python-Data-Structures | f648c63bba6b298c45b428fbb315f741d8f25d53 | 198f915bcac4a89cc1d2f92c9be4f7999d3041cf | refs/heads/master | 2022-04-12T17:50:49.628270 | 2020-03-26T18:15:32 | 2020-03-26T18:16:41 | 250,172,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def nth_to_last_node(n, head):
left_pointer = head
right_pointer = head
for i in range(n-1):
# if right_pointer == None
if not right_pointer.nextnode:
raise LookupError('Error: n is larger than the linked list')
right_pointer = right_pointer.nextnode
while(right_pointer.nextnode):
left_pointer = left_pointer.next
right_pointer = right_pointer.next
return left_pointer
| [
"[email protected]"
]
| |
a177e253ea50e040cf4d5e07336b7f3ab00906d1 | dff94b33103d645a2a7022200ea77b691ef7fbe9 | /back/migrations/0001_initial.py | c6a8150a8cd9ddd7d1cb5fda71bb5ad97c37fabc | []
| no_license | chrispsk/dw | 07f4babd503108807b233bc19a9474e2ef089486 | d464a26fcb6f43f79120f9751b9aee1bc0a1d6df | refs/heads/main | 2023-03-28T00:53:22.701182 | 2021-04-03T18:06:03 | 2021-04-03T18:06:03 | 349,007,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # Generated by Django 2.2.5 on 2021-03-18 12:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vulnerability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vul_name', models.CharField(max_length=200)),
('summary', models.TextField()),
('severity', models.CharField(max_length=20)),
],
options={
'db_table': 'vulnerabilities',
},
),
migrations.CreateModel(
name='Date',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('publish_date', models.DateTimeField()),
('vul_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='back.Vulnerability')),
],
options={
'db_table': 'dates',
},
),
]
| [
"[email protected]"
]
| |
b78dd6f1233d96d74d8b8bfd51465d2c4ae27fe7 | f25ebcee8c6656ec182f7b5c7775a5deebccdc47 | /flask_example_venv/bin/tree-cli | d7f717a71f163acb8ea765e939b52e640c2845c2 | []
| no_license | ThourayaBchir/flask_app_example | e2efeb60e20a983f183c2a3473b4d1b7f0091929 | 4c515ee0470a696917e894fd18d2f0def3c7ea10 | refs/heads/master | 2021-02-12T13:56:52.902798 | 2020-03-03T09:43:05 | 2020-03-03T09:43:05 | 244,597,078 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/Users/thy/Documents/flask_app_example/flask_example_venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from Tree.cli import create_tree
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(create_tree())
| [
"[email protected]"
]
| ||
c1bc4002b45701e9ddcbdae5fbd9f337effbe930 | 587dbdf730b6cc3e693efc5dca5d83d1dd35ee1a | /leetcode/1501-1800/1785.py | d13fff05ea446efdd48f2dcbc0f23ee12d81b53b | []
| no_license | Rivarrl/leetcode_python | 8db2a15646d68e4d84ab263d8c3b6e38d8e3ea99 | dbe8eb449e5b112a71bc1cd4eabfd138304de4a3 | refs/heads/master | 2021-06-17T15:21:28.321280 | 2021-03-11T07:28:19 | 2021-03-11T07:28:19 | 179,452,345 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # -*- coding: utf-8 -*-
# ======================================
# @File : 1785
# @Time : 2021/3/8 12:20
# @Author : Rivarrl
# ======================================
from algorithm_utils import *
class Solution:
"""
[1785. 构成特定和需要添加的最少元素](https://leetcode-cn.com/problems/minimum-elements-to-add-to-form-a-given-sum/)
"""
@timeit
def minElements(self, nums: List[int], limit: int, goal: int) -> int:
return (abs(sum(nums) - goal) + limit - 1) // limit
if __name__ == '__main__':
a = Solution()
a.minElements(nums = [1,-1,1], limit = 3, goal = -4)
a.minElements(nums = [1,-10,9,1], limit = 100, goal = 0) | [
"[email protected]"
]
| |
6f4f236a04b08ff986588d8d74bf27e19b3776ce | a9958f7c7887a92ec9fc48b02ed8a5cb75a03311 | /db.py | 1b274e58f4478c7f209d2e9b19cf25ce7d613166 | []
| no_license | ahmedfadhil/Dynamically-Weighted-Bandwidth- | 816c18777b49f3520433e65accf9e179f64e0836 | 1c8821aec73f32e704d12cebffcda01d1319bc80 | refs/heads/master | 2021-01-02T09:39:58.814463 | 2017-08-03T21:55:26 | 2017-08-03T21:55:26 | 99,271,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py | import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
style.use('ggplot')
X, y = make_blobs(n_samples=15, centers=3, n_features=2)
##X = np.array([[1, 2],
## [1.5, 1.8],
## [5, 8],
## [8, 8],
## [1, 0.6],
## [9, 11],
## [8, 2],
## [10, 2],
## [9, 3]])
##plt.scatter(X[:, 0],X[:, 1], marker = "x", s=150, linewidths = 5, zorder = 10)
##plt.show()
'''
1. Start at every datapoint as a cluster center
2. take mean of radius around cluster, setting that as new cluster center
3. Repeat #2 until convergence.
'''
class Mean_Shift:
def __init__(self, radius=None, radius_norm_step=100):
self.radius = radius
self.radius_norm_step = radius_norm_step
def fit(self, data):
if self.radius == None:
all_data_centroid = np.average(data, axis=0)
all_data_norm = np.linalg.norm(all_data_centroid)
self.radius = all_data_norm / self.radius_norm_step
print(self.radius)
centroids = {}
for i in range(len(data)):
centroids[i] = data[i]
weights = [i for i in range(self.radius_norm_step)][::-1]
while True:
new_centroids = []
for i in centroids:
in_bandwidth = []
centroid = centroids[i]
for featureset in data:
distance = np.linalg.norm(featureset - centroid)
if distance == 0:
distance = 0.00000000001
weight_index = int(distance / self.radius)
if weight_index > self.radius_norm_step - 1:
weight_index = self.radius_norm_step - 1
to_add = (weights[weight_index] ** 2) * [featureset]
in_bandwidth += to_add
new_centroid = np.average(in_bandwidth, axis=0)
new_centroids.append(tuple(new_centroid))
uniques = sorted(list(set(new_centroids)))
to_pop = []
for i in uniques:
for ii in [i for i in uniques]:
if i == ii:
pass
elif np.linalg.norm(np.array(i) - np.array(ii)) <= self.radius:
# print(np.array(i), np.array(ii))
to_pop.append(ii)
break
for i in to_pop:
try:
uniques.remove(i)
except:
pass
prev_centroids = dict(centroids)
centroids = {}
for i in range(len(uniques)):
centroids[i] = np.array(uniques[i])
optimized = True
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
if optimized:
break
self.centroids = centroids
self.classifications = {}
for i in range(len(self.centroids)):
self.classifications[i] = []
for featureset in data:
# compare distance to either centroid
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
# print(distances)
classification = (distances.index(min(distances)))
# featureset that belongs to that cluster
self.classifications[classification].append(featureset)
def predict(self, data):
# compare distance to either centroid
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = (distances.index(min(distances)))
return classification
clf = Mean_Shift()
clf.fit(X)
centroids = clf.centroids
print(centroids)
colors = 10 * ['r', 'g', 'b', 'c', 'k', 'y']
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker="x", color=color, s=150, linewidths=5, zorder=10)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k', marker="*", s=150, linewidths=5)
plt.show() | [
"[email protected]"
]
| |
24cd0e7ef358d89c027efe1f498eef298d992f52 | f7d2dff2aac6af35e7a3ef1dfbafdb15c1ab735a | /Source/MonthlyNicsFirearmBackgroundCheck.py | ff3ff7ea003ca8b1596ed1702c5b03339eb1d06b | []
| no_license | santhoshsundar/NICS-Firearm-Background-Check | 5114cb9963073c89685d10877e0640999040875a | 73d0d6fa28c0e2297137d459c22b0aaa11b30f9d | refs/heads/master | 2021-09-08T10:59:30.801178 | 2018-03-09T11:24:18 | 2018-03-09T11:24:18 | 124,420,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import matplotlib
matplotlib.use('TkAgg')
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
sb.set()
df = pd.read_csv('../Data/nics-firearm-background-checks.csv')
totals_monthly = df.groupby('month')['totals'].sum()
totals_monthly = totals_monthly[2:]
ticks = pd.np.arange(2, len(totals_monthly), 12)
ax = plt.subplot()
ax.figure.set_facecolor("#FFFFFF")
ax = totals_monthly.plot(kind="area", figsize=(12, 8), color="grey", alpha=0.75)
ax.set_title("Monthly NICS Background Check Totals Since 1999", fontsize=24)
ax.set_xticks(ticks)
ax.set_xticklabels([ totals_monthly.index[i].split("-")[0] for i in ticks])
ax.set_yticklabels([ "{0:,.0f}".format(y) for y in ax.get_yticks() ], fontsize=12)
plt.margins(x=0)
plt.savefig('../Charts/month_nics_firearm_background_check.png') | [
"[email protected]"
]
| |
5feb057978dcef04f7941a69cf4669689c34a76d | ca7da6ff26b5bd739c2fe15acb5651c5a59823cb | /campaigns/migrations/0001_initial.py | 7a86f8d62b6ea0752372258680b70f7147986ce2 | []
| no_license | mihi-tr/cntrct | b015da328d186daa4e4a7ea47eb4048ac1dfb148 | d205f3c43df1e0e0f8d2a693274700d4511083fe | refs/heads/master | 2016-08-03T00:25:56.321637 | 2014-08-19T06:36:56 | 2014-08-19T06:36:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Campain'
db.create_table(u'campaigns_campain', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=512)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('overview', self.gf('django.db.models.fields.TextField')()),
('signature_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('image_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal(u'campaigns', ['Campain'])
def backwards(self, orm):
# Deleting model 'Campain'
db.delete_table(u'campaigns_campain')
models = {
u'campaigns.campain': {
'Meta': {'object_name': 'Campain'},
'change_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'overview': ('django.db.models.fields.TextField', [], {}),
'signature_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['campaigns']
| [
"[email protected]"
]
| |
58e81d09ba3bc33e083459c4867d677e429e25f0 | f2137b5f8be137c990f73ad36adb2a9e405b49bf | /main/views.py | 4d10e3e1a3ff3f706c45bb2ec6866ad45c209d20 | []
| no_license | adrianmrit/timedbase | 0d100f2263f45058a7d22e64d2b71bef76a2ca9c | 49b85139f0c5bfa93133780457adf07e7fb27c0a | refs/heads/master | 2022-04-10T16:53:11.739659 | 2020-03-26T06:33:16 | 2020-03-26T06:33:16 | 244,293,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | from django.shortcuts import render, get_object_or_404
from .models import Brand, Watch, Price, Store
from django.db.models.functions import Substr
from django.db.models import Count
import string
from django.views import View
from django.db.models import OuterRef, Subquery, Exists
from django.forms.models import model_to_dict
# Create your views here.
class IndexView(View):
def get(self, request):
# TODO: Better home page
return render(request, 'main/index.html')
class BrandsView(View):
def get (self, request, letter=None):
count_letters_queryset = []
if not letter:
brands = Brand.objects.order_by('?')[0:10]
else:
brands = Brand.objects.filter(cleaned_name__startswith=letter)
available_alphabet = Brand.objects.all().annotate(first_letter=Substr('cleaned_name', 1, 1)).values_list("first_letter", flat=True).distinct()
# available_brands = count_letters_queryset[0].union(*count_letters_queryset[1:])
return render(request, 'main/brands.html', {'brands': brands, 'available_alphabet': available_alphabet, 'alphabet': string.ascii_lowercase, 'active_letter': letter})
class BrandView(View):
def get(self, request, id):
brand = get_object_or_404(Brand, pk=id)
return render(request, 'main/brand.html', {'brand': brand})
class WatchView(View):
def get(self, request, id):
watch = get_object_or_404(Watch, pk=id)
watch_details = model_to_dict(watch)
del watch_details['name']
del watch_details['url']
del watch_details['image']
del watch_details['description']
del watch_details['id']
del watch_details['cleaned_reference']
del watch_details['brand']
prices = Price.objects.filter(watch=watch).order_by('store', '-timestamp').distinct('store')
return render(request, 'main/watch.html', {'watch': watch, 'prices': prices, 'watch_details': watch_details}) | [
"[email protected]"
]
| |
eecd8d7e1cac6970d97109c32dbd46b80d850aa1 | c7cc67f02e23baf64309c800ba9d2d98c8b93455 | /CARTRegressor.py | 4ca3734a0a25ae9a458f03f27ad3813fb6019820 | []
| no_license | struggl/MachineLearning | fbbbd16d64a507d398bc47a3cf4f916903fdcf35 | e5f2b7da88fb4eed8aaf6e842b7ab1a8bbe0677a | refs/heads/master | 2020-07-23T17:59:57.494656 | 2019-12-29T09:12:54 | 2019-12-29T09:12:54 | 207,659,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,883 | py | '''CART决策树实现回归'''
import numpy as np
import collections
from MLBase import DecisionTreeRegressorBase
class CARTRegressor(DecisionTreeRegressorBase):
'''CART决策树回归,与C4.5决策树分类器实现的区别在于:
1._chooseBestFeatureToSplit方法中不使用信息增益_calInformationGain而是误差平方和_calSSE
2.save_model与load_model方法的默认路径
'''
def __init__(self,dataDir,reader=None):
super().__init__(dataDir,reader)
def _calScore(self,xtrain,ytrain,feat,splitVal):
'''改写父类的同名方法,这里采用误差平方和指标(SSE)。与CART分类不同的是,CART回归时,分裂后SSE的计算不加权
先根据特征feat和分割点spliVal将数据集一分为二,然后计算分割前后的指标增益'''
xtrain = self._assert_xdata(xtrain)
ytrain = self._assert_ydata(ytrain)
left_data,right_data = self._splitDataSet(xtrain,ytrain,feat,splitVal)
from math import log
#计算数据集划分前后的mse指标收益
baseSSE = self._calSSE(ytrain)
newSSE = 0
newSSE += self._calSSE(left_data[1]) + self._calSSE(right_data[1])
SSEGain = baseSSE - newSSE
return SSEGain
def _chooseBestFeatureToSplit(self,xtrain,ytrain,epsion=0):
'''使用误差平方和(Sum Square Error,SSE)选择最优划分特征,若数据集特征数不大于0或最优划分的指标收益
不大于阈值epsion,则返回None
Args:
epsion:每次结点划分时损失函数下降的阈值,默认为0
'''
if epsion < 0:
raise ValueError('结点分裂阈值epsion不能为负数!')
numFeat = len(xtrain[0])
if numFeat < 1:
return None
bestGain = epsion
bestFeat = None
bestSplitVal = None
for feat in range(numFeat):
splitValList = sorted(list(set(xtrain[:,feat])))
for i in range(len(splitValList)-1):
splitVal = (splitValList[i]+splitValList[i+1]) / 2.0
#划分后的指标增益,若为C4.5,则为信息增益比,若为CART回归树则为均方误差
curGain = self._calScore(xtrain,ytrain,feat,splitVal)
if curGain > bestGain:
bestFeat = feat
bestSplitVal = splitVal
bestGain = curGain
if bestFeat != None:
return bestFeat,bestSplitVal,bestGain
def _fixdata(self):
self._reader._xtrain = np.asarray(self._reader._xtrain,dtype='float64')
self._reader._xtest = np.asarray(self._reader._xtest,dtype='float64')
self._reader._xeval = np.asarray(self._reader._xeval,dtype='float64')
def _get_examples(self,node):
'''获取node存放的样本'''
self._cur_model._validate(node)
return node._examples
def _splitDataSet(self,xdata,ydata,bestFeat,bestSplitVal):
'''根据最优特征的最优分割点将数据集一分为二'''
xdata = self._assert_xdata(xdata)
ydata = self._assert_ydata(ydata)
left_xdata = []
left_ydata = []
right_xdata = []
right_ydata = []
for i in range(len(xdata)):
if xdata[i][bestFeat] <= bestSplitVal:
left_xdata.append(xdata[i])
left_ydata.append(ydata[i])
else:
right_xdata.append(xdata[i])
right_ydata.append(ydata[i])
left_xdata = np.asarray(left_xdata,dtype=xdata.dtype)
left_ydata = np.asarray(left_ydata,dtype=ydata.dtype)
right_xdata = np.asarray(right_xdata,dtype=xdata.dtype)
right_ydata = np.asarray(right_ydata,dtype=ydata.dtype)
return (left_xdata,left_ydata),(right_xdata,right_ydata)
def _fit(self,xtrain,ytrain,examples,depth,max_depth,epsion):
'''训练CART回归树。
递归构建CART回归树的核心过程:
遍历所有可能特征:
对每个特征所有可能取值(连续值)进行排序,所有相邻取值的中点集合构成了所有可能的分割阈值
遍历每个可能分割阈值进行二元划分,计算划分前后的指标
最优指标对应的特征及最优分割阈值即为所求
Args:
epsion:float.默认为0.选取最优特征和最优分割点时,当前结点分裂前后指标(例如信息增益,基尼指数)
变化量的最小阈值。
'''
if max_depth != None and depth > max_depth: #递归返回情况1:树的深度达到最大设定时终止,返回None
return
freq = 0
for lb in ytrain:
if lb == ytrain[0]:
freq += 1
if freq == len(ytrain): #递归返回情况2:所有样本因变量值相同,返回叶结点
cur_node = self.Node(value=ytrain[0],
examples=examples,
depth=depth)
cur_node._loss = 0
if self._cur_model._root is None:
self._cur_model.add_root(cur_node)
return cur_node
#选择最优划分特征和最优划分阈值
cur_loss = self._calSSE(ytrain)
res = self._chooseBestFeatureToSplit(xtrain,ytrain,epsion=epsion)
if res is None: #递归返回情况3:无法继续切分特征时,返回叶结点
cur_node = self.Node(value=np.mean(ytrain),
examples=examples,
loss=cur_loss,
depth=depth)
if self._cur_model._root is None:
self._cur_model.add_root(cur_node)
return cur_node
bestFeat,bestSplitVal,gain = res
resNode = self.Node(feature=bestFeat,
splitVal=bestSplitVal,
gain=gain,
loss=cur_loss,
examples=examples,
depth=depth)
if self._cur_model._root is None:
self._cur_model.add_root(resNode)
else:
self._cur_model._size += 1
#仅当当前结点深度depth小于限定深度max_depth时才分裂当前结点
if max_depth is None or depth < max_depth:
#根据最优特征和最优分割点为左右孩子划分数据集,返回的left_dataSet结构为(left_xtrain,left_ytrain),
#right_dataSet类似
left_dataSet,right_dataSet = self._splitDataSet(xtrain,ytrain,bestFeat,bestSplitVal)
left = self._fit(xtrain=left_dataSet[0],
ytrain=left_dataSet[1],
examples=left_dataSet,
depth=resNode._depth+1,
max_depth=max_depth,
epsion=epsion)
right = self._fit(xtrain=right_dataSet[0],
ytrain=right_dataSet[1],
examples=right_dataSet,
depth=resNode._depth+1,
max_depth=max_depth,
epsion=epsion)
#父结点指向对应孩子
resNode._left = left
resNode._right = right
#维护孩子的_parent属性
left._parent = resNode
right._parent = resNode
#维护孩子的_parent_split_feature_val属性
left._parent_split_feature_val = (bestFeat,bestSplitVal,'left')
right._parent_split_feature_val = (bestFeat,bestSplitVal,'right')
#维护决策树结点数量属性,C4.5决策树的建树过程保证了只要当前结点能分裂,则必有两个孩子
self._cur_model._size += 2
#若当前结点未分裂(深度到达限制),需要设定当前结点为叶结点
if self._cur_model.num_children(resNode) == 0:
resNode._feature = None
resNode._value = np.mean(ytrain)
return resNode
#-------------------------------------------------公开接口------------------------------------------------
def fit(self,xtrain=None,
ytrain=None,
examples=None,
depth=None,
max_depth=None,
alpha_leaf=0,
bool_prune=False,
epsion=0.0):
"""模型拟合的公开接口。若训练数据集未直接提供,则使用self._reader读取训练数据集
Args:
alpha_leaf:后剪枝对叶结点的正则化超参数,有效取值大于等于0.
epsion:float.默认为0.选取最优特征和最优分割点时,当前结点分裂前后指标(例如信息增益,基尼指数)
变化量的最小阈值。
"""
if xtrain is None or ytrain is None:
self._fixdata()
self._cur_model = self.DecisionTree()
self._fit(xtrain=self._reader._xtrain,
ytrain=self._reader._ytrain,
examples=(self._reader._xtrain,self._reader._ytrain),
depth=1,
max_depth=max_depth,
epsion=epsion)
else:
xtrain = self._assert_xdata(xtrain)
ytrain = self._assert_ydata(ytrain)
self._cur_model = self.DecisionTree()
self._fit(xtrain=self._reader._xtrain,
ytrain=self._reader._ytrain,
examples=(self._reader._xtrain,self._reader._ytrain),
depth=1,
max_depth=max_depth,
epsion=epsion)
if bool_prune:
self._prune(alpha_leaf=alpha_leaf)
def bool_not_trained(self,tree=None):
'''判断决策树是否已经训练,仅判断根结点,默认在_fit和_prune方法的更新过程中其余结点维护了相应的特性'''
if tree is None:
tree = self._cur_model
if tree is None or tree._root is None:
return True
if tree._root._left is None and tree._root._right is None and tree._root._value is None:
return True
return False
def predict(self,xtest=None,bool_use_stored_model=False):
'''模型预测的公开接口'''
if bool_use_stored_model:
use_model = self._stored_model
else:
use_model = self._cur_model
if self.bool_not_trained(use_model):
raise self.NotTrainedError('无法进行预测,因为决策树分类器尚未训练!')
if xtest is None:
cur_xtest = self._reader.xtest
else:
cur_xtest = self._assert_xdata(xtest)
if use_model.is_leaf(use_model._root):
preds = [use_model._root._value] * len(cur_xtest)
return np.asarray(preds)
preds = [None] * len(cur_xtest)
for i in range(len(cur_xtest)):
node = use_model._root
while not use_model.is_leaf(node):
if cur_xtest[i][node._feature] <= node._splitVal:
node = node._left
else:
node = node._right
preds[i] = node._value
return np.asarray(preds)
def eval(self,bool_use_stored_model=False,method=None):
preds = self.predict(self._reader._xeval,bool_use_stored_model)
return preds,self._evaluator.eval(preds,self._reader._yeval,method)
def save_model(self,path=None):
'''决策树分类器序列化'''
if self.bool_not_trained():
raise self.NotTrainedError('无法进行模型序列化,因为决策树分类器尚未训练!')
if path is None:
cur_path = self._reader._dataDir + '/CARTRegressor.pkl'
else:
cur_path = path
import pickle
with open(cur_path,'wb') as f:
pickle.dump(self._cur_model,f)
print('save_model done!')
def load_model(self,path=None):
'''载入模型'''
if path is None:
cur_path = self._reader._dataDir + '/CARTRegressor.pkl'
else:
cur_path = path
import pickle
with open(cur_path,'rb') as f:
self._stored_model = pickle.load(f)
print('load_model done!')
def print_tree(self):
'''层序遍历输出决策树结点及结点关键信息'''
if self._cur_model is None:
use_model = self._stored_model
else:
use_model = self._cur_model
Q = collections.deque()
Q.append(use_model._root)
while len(Q) != 0:
node = Q.popleft()
node.showAttributes()
print('---\n')
for child in use_model.children(node):
Q.append(child)
class Node(DecisionTreeRegressorBase.Node):
'''决策树的结点类'''
__slots__ = '_feature','_value','_left','_right','_parent','_depth',\
'_examples','_parent_split_feature_val','_splitVal','_gain'
def __init__(self,feature=None,
value=None,
left=None,
right=None,
parent=None,
depth=None,
examples=None,
splitVal=None,
loss=None,
gain=None):
'''由于C4.5采用二元划分对连续属性进行分割,因此C4.5结点定义的属性与ID3有所区别
Args:
feature:存储当前结点的划分属性
value:若为叶结点,则_value属性存储了该叶结点所拥有的ydata的均值,否则为None
left:当前结点的左孩子
right:当前结点的右孩子
parent父结点,根结点设置为None
depth:结点的深度,根结点设置深度为1
examples:tuple.每个结点存储了自己拥有的xtrain与ytrain
splitVal:float.存储了本结点分裂特征的分割阈值
gain:存储当前最优分裂结点对应的指标增益(典型指标函数为信息增益、信息增益比、基尼指数)
loss:存储当前结点ydata的误差平方和(SSE)
'''
self._feature = feature
self._value = value
self._left = left
self._right = right
self._parent = parent
self._depth = depth
self._examples = examples
self._splitVal = splitVal
self._loss = loss
self._gain = gain
#存储父结点的分裂特征及分割点取值
#格式为(bestFeat,bestSplitVal,'right')或(bestFeat,bestSplitVal,'left')
self._parent_split_feature_val = None
def showAttributes(self):
print('_depth:'+repr(self._depth))
print('父结点划分特征取值_parent_split_feature_val:'+repr(self._parent_split_feature_val))
print('当前划分属性_feature:'+repr(self._feature))
print('当前结点划分阈值_splitVal:'+repr(self._splitVal))
print('_gain:'+repr(self._gain))
print('_loss:'+repr(self._loss))
print('_value:'+repr(self._value))
#print('_examples:'+repr(self._examples))
class DecisionTree(DecisionTreeRegressorBase.DecisionTree):
'''决策树数据结构'''
def __init__(self):
self._size = 0
self._root = None
def __len__(self):
return self._size
def _validate(self,node):
if not isinstance(node,DecisionTreeRegressorBase.Node):
raise TypeError
def is_leaf(self,node):
self._validate(node)
return node._left is None and node._right is None and node._value != None
def is_root(self,node):
self._validate(node)
return self._root is node
#-----------------------------访问方法-----------------------------
def preOrder(self,node=None):
'''从node开始进行前序遍历,若node为None,则从根开始遍历,返回一个迭代器'''
if node is None:
node = self._root
if isinstance(node,DecisionTreeRegressorBase.Node):
yield node
for child in self.children(node):
for nd in self.preOrder(child):
yield nd
def parent(self,node):
'''返回给定node的父结点'''
self._validate(node)
return node._parent
def children(self,node):
'''返回给定结点node的孩子结点的迭代器'''
self._validate(node)
if node._left != None:
yield node._left
if node._right != None:
yield node._right
def sibling(self,node):
'''返回给定结点node的兄弟结点的迭代器'''
self._validate(node)
if node is node._parent._left:
return node._parent._right
else:
return node._parent._left
def num_children(self,node):
self._validate(node)
num = 0
if node._left != None:
num += 1
if node._right != None:
num += 1
return num
#-----------------------------更新方法------------------------------
def add_root(self,node):
'''为决策树添加根结点,根结点深度设定为1'''
self._root = node
node._depth = 1
self._size = 1
if __name__ == '__main__':
obj = CARTRegressor(dataDir='/home/michael/data/GIT/MachineLearning/data/forCART/Regressor')
#print(obj._reader._xtrain)
#obj._fixdata()
#print(obj._reader._xtrain)
obj.fit(max_depth=7,bool_prune=False)
#obj.print_tree()
#obj.save_model()
#obj.load_model()
obj.print_tree()
#print('*************')
#print(obj._cur_model)
#print('*************')
#obj._cur_model._root.showAttributes()
#print(obj._stored_model)
#验证集上预测结果
#print(obj.eval(bool_use_stored_model=False)[0])
#print(obj.eval(bool_use_stored_model=False)[1])
#print('---')
#for node in obj._cur_model.preOrder():
#print(node)
# node.showAttributes()
#print(obj.eval(bool_use_stored_model=False)[0])
#print(obj.eval(bool_use_stored_model=False)[1])
#print(obj.eval(bool_use_stored_model=True)[0])
#print(obj.eval(bool_use_stored_model=True)[1])
#验证集上评价结果
#print(obj.eval(bool_use_stored_model=True,method='f1-score')[1])
#执行预测
#print(obj.predict([[0,0,0,0,0,0]]))
#print(obj.predict([[10,10,10,10,10,10]]))
#print(obj.predict([[1,1,1,1,1,0]],True))
#obj.save_model()
#obj.fit(alpha_leaf=0,max_depth=3,bool_prune=True)
#obj.fit(alpha_leaf=0,bool_prune=False)
#obj.print_tree()
print(obj.eval(bool_use_stored_model=False)[0])
print(obj.eval(bool_use_stored_model=False)[1])
print(len(obj._cur_model))
| [
"[email protected]"
]
| |
68734f0871f16a8b272e9587dcb828ec191bbac6 | 28aec93a7a4c850de4fca40c89660d85e5529b5d | /afieldio/settings.py | fea2d792d602b37aa94180251f5c9a5d1934d82b | []
| no_license | afieldio/django-portfolio-boilerplate | 85e2682507f8476f64e6a7a11f67b686e8d30be7 | ea7974dabd6654b0bc509ce34771fa3fb34fc12e | refs/heads/master | 2021-05-04T10:40:50.558858 | 2019-03-09T12:25:48 | 2019-03-09T12:25:48 | 51,506,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | """
Django settings for afieldio project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ozd8wr38gcuruca2h*&h)wyvqb9)utv%49shtu+kay!%+!$e99'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portfolio',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'afieldio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'afieldio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_PORT = 1025
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
GOOGLE_RECAPTCHA_SECRET_KEY = '6LdvSSkUAAAAABZYSY_WgM6JwRMGmFL-MuXrhG80'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
d1ceddd5578256f0263eb983b6a49351a31c6bcb | 7546e55f860b7701cdeae4f965c40a6a8f3378c4 | /main/migrations/0001_initial.py | 0b2d306436d8ac4255aeae1c7a2e970b3c7db734 | []
| no_license | ave-78/django_db | 7503c6e6b86b6eb2e054bf300e9aec16575d071d | 1574c14b217bc641d4072f883275bf6d637dff46 | refs/heads/master | 2020-07-12T09:07:48.528959 | 2019-08-27T19:34:31 | 2019-08-27T19:34:31 | 204,775,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | # Generated by Django 2.2.4 on 2019-08-17 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('age', models.IntegerField()),
],
),
]
| [
"[email protected]"
]
| |
f6b28e44289b1bbd336d17842f7005fa46b1940f | 0033cac5a4a233b00230912f2efd4a6b7971a5ea | /restapi/config/urls.py | fa6a19e7a44d4c4613ce2ed5ff6f0223c6d32991 | []
| no_license | RamyaSaba/restapi | c2460008bdd7018c99eb32195b9bd86bd75fe0fa | 594f99532746fdde84e1d132eaea4f68fe95134c | refs/heads/main | 2023-07-29T03:52:38.114933 | 2021-09-07T11:18:35 | 2021-09-07T11:18:35 | 403,935,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
]
| [
"[email protected]"
]
| |
c69b72f0144463de20a2f5dabb19dfaa40619e36 | 83376d2150f5a062fad469f38861bc9a966a846d | /soft/views.py | ac8cc6f3bf7431f786bd70819c81f9e1ed224d7a | []
| no_license | jramirezminery/soft_eun | bd15c1595cbddd53d1aae3c5dd95ad53b122d3f6 | 9fcdaf14d5af4ad31329ba3a493fb1e02846f348 | refs/heads/main | 2023-06-16T18:09:34.698049 | 2021-07-13T18:02:03 | 2021-07-13T18:02:03 | 385,690,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,940 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from datetime import datetime, timedelta
from dateutil import tz, parser
from soft.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token
from soft.graph_helper import *
from soft.sql_helper import get_all_levels, get_all_titles, get_all_titles_with_levels
# <HomeViewSnippet>
def home(request):
context = initialize_context(request)
return render(request, 'views/home.html', context)
# </HomeViewSnippet>
# <InitializeContextSnippet>
def initialize_context(request):
context = {}
# Check for any errors in the session
error = request.session.pop('flash_error', None)
if error != None:
context['errors'] = []
context['errors'].append(error)
# Check for user in the session
context['user'] = request.session.get('user', {'is_authenticated': False})
return context
# </InitializeContextSnippet>
# <SignInViewSnippet>
def sign_in(request):
# Get the sign-in flow
flow = get_sign_in_flow()
# Save the expected flow so we can use it in the callback
try:
request.session['auth_flow'] = flow
except Exception as e:
print(e)
# Redirect to the Azure sign-in page
return HttpResponseRedirect(flow['auth_uri'])
# </SignInViewSnippet>
# <SignOutViewSnippet>
def sign_out(request):
# Clear out the user and token
remove_user_and_token(request)
return HttpResponseRedirect(reverse('home'))
# </SignOutViewSnippet>
# <CallbackViewSnippet>
def callback(request):
# Make the token request
result = get_token_from_code(request)
#Get the user's profile
user = get_user(result['access_token'])
# Store user
store_user(request, user)
return HttpResponseRedirect(reverse('home'))
# </CallbackViewSnippet>
# <DrivesViewSnippet>
def drives(request):
context = initialize_context(request)
user = context['user']
token = get_token(request)
# drivesId = get_drives_ids(token, get_my_organization_drives(token, user['timeZone'])["id"], user['timeZone'])
drivesId = get_drives_ids(token, 'solucionesroots.sharepoint.com,1dd1c895-fe68-457c-a4ba-8c20fc92b8c0,403d5c33-99ca-44a4-a2cb-7a4e1ebdd14f', user['timeZone'])
context['drives'] = drivesId['value']
return render(request, 'views/drives.html', context)
# </DrivesViewSnippet>
# <DrivesListViewSnippet>
def list_drives(request, id):
context = initialize_context(request)
user = context['user']
token = get_token(request)
drivesId = get_list_drive(token, id , user['timeZone'])
context['driveslist'] = drivesId['value']
return render(request, 'views/drives_list.html', context)
# </DrivesListViewSnippet>
# <DrivesFolderListViewSnippet>
def list_drives_with_folder(request, id):
context = initialize_context(request)
user = context['user']
token = get_token(request)
drivesId = get_list_drive_folder(token, 'b!lcjRHWj-fEWkuowg_JK4wDNcPUDKmaREost6Th690U-yZsnQo2CEQr8gFH8_fVC-', id , user['timeZone'])
context['driveslistfolder'] = drivesId['value']
return render(request, 'views/drives_folder_list.html', context)
# </DrivesFolderListViewSnippet>
# <CalendarViewSnippet>
def calendar(request):
context = initialize_context(request)
user = context['user']
# Load the user's time zone
# Microsoft Graph can return the user's time zone as either
# a Windows time zone name or an IANA time zone identifier
# Python datetime requires IANA, so convert Windows to IANA
time_zone = get_iana_from_windows(user['timeZone'])
tz_info = tz.gettz(time_zone)
# Get midnight today in user's time zone
today = datetime.now(tz_info).replace(
hour=0,
minute=0,
second=0,
microsecond=0)
# Based on today, get the start of the week (Sunday)
if (today.weekday() != 6):
start = today - timedelta(days=today.isoweekday())
else:
start = today
end = start + timedelta(days=7)
token = get_token(request)
events = get_calendar_events(
token,
start.isoformat(timespec='seconds'),
end.isoformat(timespec='seconds'),
user['timeZone'])
if events:
# Convert the ISO 8601 date times to a datetime object
# This allows the Django template to format the value nicely
for event in events['value']:
event['start']['dateTime'] = parser.parse(event['start']['dateTime'])
event['end']['dateTime'] = parser.parse(event['end']['dateTime'])
context['events'] = events['value']
return render(request, 'views/calendar.html', context)
# </CalendarViewSnippet>
# <NewEventViewSnippet>
def newevent(request):
context = initialize_context(request)
user = context['user']
if request.method == 'POST':
# Validate the form values
# Required values
if (not request.POST['ev-subject']) or \
(not request.POST['ev-start']) or \
(not request.POST['ev-end']):
context['errors'] = [
{ 'message': 'Invalid values', 'debug': 'The subject, start, and end fields are required.'}
]
return render(request, 'views/newevent.html', context)
attendees = None
if request.POST['ev-attendees']:
attendees = request.POST['ev-attendees'].split(';')
body = request.POST['ev-body']
# Create the event
token = get_token(request)
create_event(
token,
request.POST['ev-subject'],
request.POST['ev-start'],
request.POST['ev-end'],
attendees,
request.POST['ev-body'],
user['timeZone'])
# Redirect back to calendar view
return HttpResponseRedirect(reverse('calendar'))
else:
# Render the form
return render(request, 'views/newevent.html', context)
print('hello')
# </NewEventViewSnippet>
# <LevelListViewSnippet>
def level(request):
context = initialize_context(request)
for p in get_all_titles_with_levels(str(2)):
print(p)
return render(request, 'views/login/level.html', context)
# </LevelListViewSnippet> | [
"[email protected]"
]
| |
e2726c0d4cfcd0845bf0b5d7398d5d88d7a2f7ea | c5460b21b473c61606aef92613a36fa429da9c37 | /Web-Scraping-and-Document-Databases/scrape_mars.py | 988bd539c7adae10c3d793b180faac707cda7910 | []
| no_license | chitraderrob/NU-Homework | ce5461780147cf78d38b3b4962e8f11b5000419e | 93cc49a3b3dbab94eb1a29f66a2cb193453814fd | refs/heads/master | 2020-06-04T04:01:58.515125 | 2019-10-23T20:13:07 | 2019-10-23T20:13:07 | 191,865,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,431 | py | #!/usr/bin/env python
# coding: utf-8
# In[68]:
from splinter import Browser
from bs4 import BeautifulSoup as bs
import requests
import time
import pandas as pd
from pprint import pprint
# In[72]:
# Define function to initialize browser
def init_browser():
executable_path = {"executable_path":"C:\Program Files\chromedriver.exe"}
return Browser("chrome", **executable_path, headless = False)
# # NASA Mars News
# In[73]:
# Define function to scrape Mars news
def mars_news():
browser = init_browser()
#Visit URL
url = "https://mars.nasa.gov/news/"
browser.visit(url)
# Scrape page into soup
html = browser.html
soup = bs(html,"html.parser")
# Find news title and paragraph
mars_title = soup.find("div",class_="content_title").text
mars_paragraph = soup.find("div", class_="article_teaser_body").text
#Create an empty news list and append news dict
news=[]
news_dict={'Title': mars_title,
'Description': mars_paragraph
}
news.append(news_dict)
browser.quit()
return news
# # JPL Mars Space Images - Featured Image
# In[75]:
# Define function to scapr Mars featured image
def mars_image():
browser = init_browser()
# Visit URL
jpl_url ="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(jpl_url)
# Scrape page into soup
html = browser.html
soup = bs(html, 'html.parser')
# Find image URL and format accordingly
featured_img_url_raw = soup.find("div", class_="carousel_items").find("article")["style"]
featured_img_url_raw = featured_img_url_raw.split("'")[1]
base_url= "https://www.jpl.nasa.gov"
featured_img_url= base_url + featured_img_url_raw
browser.quit()
return featured_img_url
# # Mars Weather
# In[77]:
# Define function to scrape Mars weather
def mars_weather():
browser = init_browser()
# Visit URL
twitter_url ="https://twitter.com/marswxreport?lang=en"
browser.visit(twitter_url)
# Scrape page into soup
html = browser.html
soup = bs(html, 'html.parser')
# Get the Mars weather tweet text
mars_weather_data = (soup.find(class_="tweet-text")).get_text()
mars_weather_data = mars_weather_data.replace('\n', ' ').replace('pic',',').split(",")[0]
browser.quit()
return mars_weather_data
# # Mars Facts
# In[81]:
# Define a function to scrape Mars facts
def mars_facts():
browser = init_browser()
facts_url="https://space-facts.com/mars/"
# Scrape table into pandas
table = pd.read_html(facts_url)
stats_table=table[1]
# Convert table info ibto HTML
stats_html=stats_table.to_html(header=False, index=False).replace('\n', '')
browser.quit()
return stats_html
# # Mars Hemispheres
# In[83]:
# Define funtion to find Mars hemispheres info
def mars_hemispheres():
browser = init_browser()
# Visit URL
hemisphere_url ="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
# Scrape page into soup
html = browser.html
soup = bs(html, 'html.parser')
# Create a list to hold hemisphere names and append names to list
hemi_names=[]
links=soup.find_all('h3')
for hemi in links:
hemi_names.append(hemi.text)
# Create a list to hold hemisphere names and URL's
hemi_urls=[]
# Visit each hemisphere site and append to the dict...append dict to list
for hemi in hemi_names:
hemi_dict ={}
browser.click_link_by_partial_text(hemi)
hemi_dict['title'] = hemi
hemi_dict['img_url'] = browser.find_by_text('Sample')['href']
hemi_urls.append(hemi_dict)
browser.back()
browser.quit()
return hemi_urls
# In[87]:
# Define a function that scrapes all Mars info
def scrape():
mars_news_scrape=mars_news()
mars_image_scrape=mars_image()
mars_weather_scrape=mars_weather()
mars_facts_scrape=mars_facts()
mars_hemispheres_scrape=mars_hemispheres()
# Define a mars_info dict to hold all information from the scrape
mars_info={'Mars_News': mars_news_scrape,
'Featured_Image': mars_image_scrape,
'Mars_Weather': mars_weather_scrape,
'Mars_Facts': mars_facts_scrape,
'Mars_Hemispheres': mars_hemispheres_scrape
}
return mars_info
| [
"[email protected]"
]
| |
72eeefb22e0a53df2a57c4dfef7d17c8dee8d8e5 | 6c2b42003dc9ae3c1eb71a2bbe88d291d5830c77 | /api/migrations/0002_competitor_positions.py | 7c233bdecf8090e507b22fd20a049ee557e08ca9 | []
| no_license | TIY-Bracket/bracket_api | 8d91641c7d31f3ac502374237ba465fe10325fe0 | 18d1d3c397aa2d39fedc3eef27a903b9cc63ab66 | refs/heads/master | 2023-01-07T04:54:59.744999 | 2015-12-17T20:00:44 | 2015-12-17T20:00:44 | 45,284,518 | 2 | 3 | null | 2022-12-26T19:54:55 | 2015-10-31T01:33:22 | CSS | UTF-8 | Python | false | false | 1,180 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Competitor',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('title', models.CharField(max_length=255)),
('user_id', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Positions',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('position', models.IntegerField()),
('parent', models.CharField(max_length=255)),
('bracket_id', models.ForeignKey(to='api.Bracket')),
('competitor_id', models.ForeignKey(to='api.Competitor')),
],
),
]
| [
"[email protected]"
]
| |
bca17f6f16c5c7b53f36b1772c1609844002a2d0 | 45a61af9028a1805c08b6f7638c7aebe8140bd2d | /Groundwater/mf6/autotest/test_z01_nightly_build_examples.py | 271101a6d317d1429ed62e0954a5f125c5a0fd18 | []
| no_license | gumilar19/Personal | 1c1fac036af3a4b9d4d425b7c8cb604271b94fd2 | c666b07c5184006aca8e6ad946cc98ef72dfe9fe | refs/heads/master | 2023-08-14T20:39:07.164849 | 2021-09-29T11:19:10 | 2021-09-29T11:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,397 | py | import os
import sys
import subprocess
import pathlib
try:
import pymake
except:
msg = 'Error. Pymake package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install https://github.com/modflowpy/pymake/zipball/master'
raise Exception(msg)
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
from simulation import Simulation
def get_example_directory(base, fdir, subdir='mf6'):
exdir = None
for root, dirs, files in os.walk(base):
for d in dirs:
if d.startswith(fdir):
exdir = os.path.abspath(os.path.join(root, d, subdir))
break
if exdir is not None:
break
return exdir
# find path to modflow6-testmodels or modflow6-testmodels.git directory
home = os.path.expanduser('~')
print('$HOME={}'.format(home))
fdir = 'modflow6-testmodels'
exdir = get_example_directory(home, fdir, subdir='mf6')
if exdir is None:
p = pathlib.Path(os.getcwd())
home = os.path.abspath(pathlib.Path(*p.parts[:2]))
print('$HOME={}'.format(home))
exdir = get_example_directory(home, fdir, subdir='mf6')
if exdir is not None:
assert os.path.isdir(exdir)
def get_branch():
try:
# determine current buildstat branch
b = subprocess.Popen(("git", "status"),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
if isinstance(b, bytes):
b = b.decode('utf-8')
# determine current buildstat branch
for line in b.splitlines():
if 'On branch' in line:
branch = line.replace('On branch ', '').rstrip()
except:
branch = None
return branch
def get_mf6_models():
"""
Get a list of test models
"""
# determine if running on travis
is_travis = 'TRAVIS' in os.environ
is_github_action = 'CI' in os.environ
# get current branch
is_CI = False
if is_travis:
is_CI = True
branch = os.environ['BRANCH']
elif is_github_action:
is_CI = True
branch = os.path.basename(os.environ['GITHUB_REF'])
else:
branch = get_branch()
print('On branch {}'.format(branch))
# tuple of example files to exclude
exclude = (None,)
# update exclude
if is_CI:
exclude_CI = ('test022_MNW2_Fig28',
'test007_751x751_confined')
exclude = exclude + exclude_CI
exclude = list(exclude)
# write a summary of the files to exclude
print('list of tests to exclude:')
for idx, ex in enumerate(exclude):
print(' {}: {}'.format(idx + 1, ex))
# build list of directories with valid example files
if exdir is not None:
dirs = [d for d in os.listdir(exdir)
if 'test' in d and d not in exclude]
else:
dirs = []
# exclude dev examples on master or release branches
if 'master' in branch.lower() or 'release' in branch.lower():
drmv = []
for d in dirs:
if '_dev' in d.lower():
drmv.append(d)
for d in drmv:
dirs.remove(d)
# sort in numerical order for case sensitive os
if len(dirs) > 0:
dirs = sorted(dirs, key=lambda v: (v.upper(), v[0].islower()))
# determine if only a selection of models should be run
select_dirs = None
select_packages = None
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--sim':
if len(sys.argv) > idx + 1:
select_dirs = sys.argv[idx + 1:]
break
elif arg.lower() == '--pak':
if len(sys.argv) > idx + 1:
select_packages = sys.argv[idx + 1:]
select_packages = [item.upper() for item in select_packages]
break
elif arg.lower() == '--match':
if len(sys.argv) > idx + 1:
like = sys.argv[idx + 1]
dirs = [item for item in dirs if like in item]
break
# determine if the selection of model is in the test models to evaluate
if select_dirs is not None:
found_dirs = []
for d in select_dirs:
if d in dirs:
found_dirs.append(d)
dirs = found_dirs
if len(dirs) < 1:
msg = 'Selected models not available in test'
print(msg)
# determine if the specified package(s) is in the test models to evaluate
if select_packages is not None:
found_dirs = []
for d in dirs:
pth = os.path.join(exdir, d)
namefiles = pymake.get_namefiles(pth)
ftypes = []
for namefile in namefiles:
ftype = pymake.get_mf6_ftypes(namefile, select_packages)
if ftype not in ftypes:
ftypes += ftype
if len(ftypes) > 0:
ftypes = [item.upper() for item in ftypes]
for pak in select_packages:
if pak in ftypes:
found_dirs.append(d)
break
dirs = found_dirs
if len(dirs) < 1:
msg = 'Selected packages not available ['
for pak in select_packages:
msg += ' {}'.format(pak)
msg += ']'
print(msg)
return dirs
def get_htol(dir):
htol = None
if dir == 'test059_mvlake_laksfr_tr':
if sys.platform.lower() == 'darwin':
htol = 0.002
return htol
def run_mf6(sim):
"""
Run the MODFLOW 6 simulation and compare to existing head file or
appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run.
"""
print(os.getcwd())
src = os.path.join(exdir, sim.name)
dst = os.path.join('temp', sim.name)
sim.setup(src, dst)
sim.run()
sim.compare()
sim.teardown()
def test_mf6model():
# determine if test directory exists
dirtest = dir_avail()
if not dirtest:
return
# get a list of test models to run
dirs = get_mf6_models()
# run the test models
for dir in dirs:
yield run_mf6, Simulation(dir, htol=get_htol(dir))
return
def dir_avail():
avail = False
if exdir is not None:
avail = os.path.isdir(exdir)
if not avail:
print('"{}" does not exist'.format(exdir))
print('no need to run {}'.format(os.path.basename(__file__)))
return avail
def main():
# write message
tnam = os.path.splitext(os.path.basename(__file__))[0]
msg = 'Running {} test'.format(tnam)
print(msg)
# determine if test directory exists
dirtest = dir_avail()
if not dirtest:
return
# get a list of test models to run
dirs = get_mf6_models()
# run the test models
for dir in dirs:
sim = Simulation(dir, htol=get_htol(dir))
run_mf6(sim)
return
if __name__ == "__main__":
print('standalone run of {}'.format(os.path.basename(__file__)))
delFiles = True
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--keep':
if len(sys.argv) > idx + 1:
delFiles = False
break
# run main routine
main()
| [
"[email protected]"
]
| |
087c4004b3983645a25c445cf140502e90ea48cb | 5bd28b96831fe60aced347d5b6c2de71689fcfd7 | /CENG114_HW2_250201073/250201073_HW2.py | e6c79d688140a77e1cb883f234eaae659d79383f | []
| no_license | kturk/Probability-and-Statistics-Assignments | 11e0053c1a8c6f9a9211b61c64a10a5313b9e707 | 95d0e29d442ae76e0abb341b645fc14d90ee1cdf | refs/heads/master | 2022-10-21T23:50:25.778275 | 2020-06-12T08:40:02 | 2020-06-12T08:40:02 | 271,749,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | """
ID = 250201073
"""
import numpy as np
from matplotlib import pyplot as plt
# Part a (Inverse Transform Method)
U = []
Xa = []
av_Xa = []
vr_Xa = []
counterA = 0
varianceSumA = 0
# Populate the given arrays.
while counterA < 50000:
u = np.random.rand()
U.append(u)
x = u ** (1/2)
Xa.append(x)
if len(av_Xa) == 0:
av_Xa.append(x) # If list is empty average = first number
else:
av_Xa.append((av_Xa[counterA-1] * len(av_Xa) + x) / (len(av_Xa) + 1) ) # Calculating new average and adding it to the list
counterA = counterA + 1
for i in range(len(Xa)):
varianceSumA = varianceSumA + ((Xa[i] - av_Xa[i]) ** 2)
vr_Xa.append(varianceSumA / (i+1)) # Adding the variance to the list
# Inspect the following plots.
plt.figure()
for i in range(len(Xa)):
plt.plot([Xa[i],U[i]],[1,1.2])
plt.figure()
hU = plt.hist(U,100,alpha=0.5,normed=True)
hXa = plt.hist(Xa,100,alpha=0.5,normed=True)
plt.figure()
plt.plot(np.cumsum(hU[0]))
plt.plot(np.cumsum(hXa[0]))
# Plot the average and variance values.
plt.figure()
plt.plot(av_Xa)
plt.title("Figure 4")
plt.figure()
plt.plot(vr_Xa)
plt.title("Figure 5")
# Part b (Rejection Method)
Xb = []
av_Xb = []
vr_Xb = []
counterB = 0
varianceSumB = 0
pdfX = 0
# Populate the given arrays.
while counterB < 50000:
xB = np.random.rand()
y = np.random.rand()
pdfX = xB * 2
if 2 * y <= pdfX: # Accepting the value
Xb.append(xB)
if len(av_Xb) == 0:
av_Xb.append(xB) # If list is empty average = first number
else:
av_Xb.append((av_Xb[counterB-1] * len(av_Xb) + xB) / (len(av_Xb) + 1) ) # Calculating new average and adding it to the list
counterB = counterB + 1
for i in range(len(Xb)):
varianceSumB = varianceSumB + ((Xb[i] - av_Xb[i]) ** 2)
vr_Xb.append(varianceSumB / (i+1)) # Adding the variance to the list
# Inspect the following plots.
plt.figure()
hXb = plt.hist(Xb,100,normed=True)
plt.figure()
plt.plot(np.cumsum(hXb[0]))
# Plot the average and variance values.
plt.figure()
plt.plot(av_Xb)
plt.title("Figure 8")
plt.figure()
plt.plot(vr_Xb)
plt.title("Figure 9")
| [
"[email protected]"
]
| |
cc954fa88f2ef80d6bbed34f913c5ce7504a0730 | 049c70af1725af2ed64696769b10cb0b4e8a9083 | /swiper/wsgi.py | c79b6d0e659c63f4648c0d3bbe4368ff2b4f049c | []
| no_license | luoxunchao/PM | a7756a50ff9f1c78f97c360131e53c8632dd8f95 | 70c14c5601a5dcfb5fdb5bec448257aa7075bd34 | refs/heads/master | 2022-04-20T03:12:33.424962 | 2020-04-21T04:02:18 | 2020-04-21T04:02:18 | 256,093,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for swiper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'swiper.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
0b086d7aa26f41565a6e64ed16dc3a0147df5f7b | 9e8ee26fdab9313df81a22dae700f8417ed60722 | /slideshow.py | ad1248130c4428f50a6a64d5acbd7038a07f9826 | []
| no_license | neelneelpurk/slideshow | fe80b812225f8f51f3bc7006b12a94f9834b03d0 | 5248134943fed11349184a008fef37c1e0baaedc | refs/heads/master | 2021-01-19T13:37:30.582857 | 2017-02-18T18:50:09 | 2017-02-18T18:50:09 | 82,403,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | import numpy as np
import cv2
import imutils
import glob
print "Give your file location:"
string = str(raw_input())
images = glob.glob(string + '*.jpg')
ch_img = cv2.imread(images[0])
ch_img = imutils.resize(ch_img, width=640, height = 540)
for fn in images:
img = cv2.imread(fn)
gray = imutils.resize(img, width=640, height = 540)
for i in range(10) :
j = i/(10.0)
dst = cv2.addWeighted(gray,j,ch_img,(1-j),0)
cv2.imshow('Slideshow',dst)
if cv2.waitKey(150) & 0xFF == ord('q'):
break
ch_img = cv2.imread(fn)
ch_img = imutils.resize(ch_img, width=640, height = 540)
cv2.destroyAllWindows()
| [
"[email protected]"
]
| |
09dc07a8917962e0450d3ab3dd894432d39d5a7e | 2b85d44c6a00b73787ff350a7f79415d9ad67a9b | /base.py | 1d99ebd958946f477c6c96d237c513dfb737c5b3 | []
| no_license | wg4568/MultiplayerServer | dbaf81e072f0760c3fcc29bd9720c3ca22a8cdcc | 694d0b8c2564716c21234aec717512a518b9555b | refs/heads/master | 2021-04-30T18:24:00.617647 | 2017-01-29T17:40:39 | 2017-01-29T17:40:39 | 80,360,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | import pygame, time
pygame.M_1 = 323
pygame.M_2 = 324
pygame.M_3 = 325
def control_check():
keys = list(pygame.key.get_pressed())
mouse = pygame.mouse.get_pressed()
keys.append(mouse[0])
keys.append(mouse[1])
keys.append(mouse[2])
return keys
class Game:
def __init__(self):
pygame.init()
self.pygame = pygame
self.title = "Template Game"
self.rate = 60
self.size = [500, 500]
self.background = (0, 0, 0)
self.show_fps = True
self.font = pygame.font.Font('freesansbold.ttf', 12)
self.events = None
self.running = False
self.frame = 0
self.clock = pygame.time.Clock()
self.draw_rect = self.pygame.draw.rect
self.draw_ellipse = self.pygame.draw.ellipse
def _control(self):
keys = control_check()
mouse = pygame.mouse.get_pos()
try: self.control(keys, mouse)
except AttributeError: pass
def _draw(self):
self.screen.fill(self.background)
if self.show_fps:
text = self.font.render("%iFPS" % self.fps, True, (255, 255, 255))
self.screen.blit(text, (10, 10))
try: self.draw()
except AttributeError: pass
def _logic(self):
try: self.logic()
except AttributeError: pass
def text(self, txt, posn, col):
text = self.font.render(txt, True, col)
self.screen.blit(text, posn)
def r(self, d, p):
if d == "x":
return self.size[0] - p
else:
return self.size[1] - p
def stop(self):
self.running = False
try: self.on_stop()
except AttributeError: pass
def run(self):
self.screen = pygame.display.set_mode(self.size)
pygame.display.set_caption(self.title)
self.running = True
self.fps = 0
fps_time_counter = time.time()
fps_counter = 0
while self.running:
fps_counter += 1
if time.time()-fps_time_counter >= 0.5:
fps_time_counter = time.time()
self.fps = fps_counter*2
fps_counter = 0
self.events = self.pygame.event.get()
self.frame += 1
for event in self.events:
if event.type == pygame.QUIT:
self.stop()
self._logic()
self._control()
self._draw()
pygame.display.update()
self.clock.tick(self.rate) | [
"[email protected]"
]
| |
07dce7326e1fd86aed16c354812c720a6127c7bc | c12b09ecd3c519cdb0828f309ce1599917720a2e | /ET/migrations/0004_auto_20161025_1923.py | cc617095d06a7a0f49be17d17446b409407ecb3a | [
"MIT"
]
| permissive | Noisyfox/EaTogether | bd9653a12aa97944bcbd98885a1724248029652b | d5b114881162fc34e71873bcacaacbe6b8da3a79 | refs/heads/master | 2023-06-07T22:46:23.262391 | 2016-10-28T10:25:08 | 2016-10-28T10:25:08 | 381,741,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-25 08:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ET', '0003_auto_20161024_0239'),
]
operations = [
migrations.AddField(
model_name='grouporder',
name='status',
field=models.CharField(choices=[('W', 'Waiting'), ('A', 'Accepted'), ('D', 'Delivering'), ('F', 'Finished')], default='W', max_length=1),
),
migrations.AddField(
model_name='personalorder',
name='status',
field=models.CharField(choices=[('W', 'Waiting'), ('D', 'Delivering'), ('F', 'Finished')], default='W', max_length=1),
),
migrations.AlterField(
model_name='grouporder',
name='courier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ET.Courier'),
),
]
| [
"[email protected]"
]
| |
77153b8fad70acaa18583bba626fc2ae4c272948 | 2897b6dcb7512ff875df949c49e15d122a7134f1 | /ez_health_card/urls.py | 13bae7ca5ab0806957c463e1b907a36034d5ad51 | []
| no_license | RomanGodMode/django-pills | 30662bcbb3ef9f9717068e0ceb3357ad5ba0ae0c | 6e9866ce0c743a73faef6fa215ed1763ec86cced | refs/heads/master | 2023-07-15T11:28:40.312612 | 2021-08-19T23:26:29 | 2021-08-19T23:26:29 | 396,949,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | """ez_health_card URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from ez_health_card import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('', include('pills_taking.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"Poza228337"
]
| Poza228337 |
955d9213fda02b98b578d9b7922b9a937028d2ac | 22a83feb9f3e6679606204e40d01191e76ad2255 | /JSONIFY_data.py | 264c0e7191e3d0acb4031842f96597e4ed79a52d | []
| no_license | stevenhuynh17/UVSA_Norcal_Summit_App | 1be18512095592f7f6d2fa66ce3e8188d11d93ed | 01d28f06ce230f501c6d3b54f6c5a0ec499f15de | refs/heads/master | 2022-12-11T16:51:21.860429 | 2019-10-04T23:47:21 | 2019-10-04T23:47:21 | 200,340,921 | 0 | 0 | null | 2022-12-09T20:59:25 | 2019-08-03T06:30:44 | JavaScript | UTF-8 | Python | false | false | 296 | py | from analysis.writeAgenda import writeAgenda
from analysis.extractFamily import extractFamily
from analysis.extractWkshp import extractWkshp
famMembers = 'data/attendees/captainCrunch.csv'
def assembleJSON():
extractFamily(famMembers)
writeAgenda()
extractWkshp()
assembleJSON()
| [
"[email protected]"
]
| |
2c03a8f00dba9649920449327717fc93a359c4e8 | 6de98ab5e761f4bb1f65db0993b0aa35ed26ca3d | /lambda/hello_world/hello_world/__init__.py | f195925ac52a6ea6e3d07bb0b01812c2f5b3154c | []
| no_license | jiixing/fab_aws | a79cdae643872af4b9db85e637471d188a7e5834 | 72916a46c74d1831d1dc9ab292c2aa0e34efe66b | refs/heads/master | 2021-07-07T21:58:21.115986 | 2017-10-01T05:12:02 | 2017-10-01T05:12:02 | 105,414,485 | 0 | 0 | null | 2017-10-01T02:32:22 | 2017-10-01T02:32:22 | null | UTF-8 | Python | false | false | 38 | py | from localcontext import LocalContext
| [
"[email protected]"
]
| |
552175db6cfacb1baf32881be918574025485a07 | fee6e33f28f267c207874f7300b2e1ef5a580d21 | /GSM_model/prot_prod.py | feb606c85c6927a5a714e6b7edbcd43c59d01dff | [
"MIT"
]
| permissive | TCAndersen/DTU_Biobuilders_2021 | 4c23782ef317083ef380060ab7762dcfeea2188f | 5a7648e7cd75c486f2a5218e7863d069f0b2c993 | refs/heads/main | 2023-06-10T10:05:25.305758 | 2021-07-01T22:03:20 | 2021-07-01T22:03:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import sys
# define the molecular weight for each nucleotide, ribonucleotide and aa (obtained from excel)
dna_MW = {'A': 313.2, 'T': 304.2, 'G': 329.2, 'C': 289.2}
rna_MW = {'A': 329.2, 'U': 306.2, 'G': 345.2, 'C': 305.2}
aa_MW = {'A': 71.078, 'R': 156.186, 'N': 114.103, 'D': 115.087, 'C': 103.143, 'E': 128.129, 'Q': 129.114, 'G': 57.051,
'H': 137.139, 'I': 113.158, 'L': 113.158, 'K': 128.172, 'M': 131.196, 'F': 147.174, 'P': 97.115, 'S': 87.077,
'T': 101.104, 'W': 186.21, 'Y': 163.173, 'V': 99.131}
aa_sequence = sys.argv[1]
protein_len = len(aa_sequence)
aa_counts = {}
# Step 1: Get count for each amino acid
for aa in aa_sequence:
if aa in aa_counts:
aa_counts[aa] += 1
else:
aa_counts[aa] = 1
# Step 2 (could be simplified with 3): Get amino acid percentage presence in the protein
aa_perc = {}
for aa in aa_counts:
aa_perc[aa] = aa_counts[aa] / protein_len * 100
# Step 3: Get gr/mol of protein for each amino acid
gr_mol = {}
for aa in aa_counts:
gr_mol[aa] = aa_MW[aa] * aa_perc[aa] / 100
# Step 4: Get mmol/ gr of protein for each amino acid
mmol_gr = {}
for aa in aa_counts:
mmol_gr[aa] = round(gr_mol[aa] / sum(gr_mol.values()) / aa_MW[aa] * 1000, 5)
atp = sum(mmol_gr.values()) * 4.3
sbml_aa_nom = {'A': 153 , 'R': 158, 'N': 161 , 'D': 156, 'C': 331, 'E': 154, 'Q': 163, 'G': 210,
'H': 490, 'I': 239, 'L': 227 , 'K': 203, 'M': 343, 'F': 272, 'P': 185, 'S': 279,
'T': 305, 'W': 280, 'Y': 274, 'V': 222}
print(sbml_aa_nom)
print(mmol_gr) | [
"[email protected]"
]
| |
637a199f53ea8c73496a92bc9bfdb4cf51269691 | ef59fd73399600e4997ff058c69ef0fc1bebecf5 | /buttonTime.py | 00166d1cabda6b65a24e590d181e3187b402fd9b | []
| no_license | jimTheSTEAMClown/Python-Code | cfae6d5e1fde5229d7bca7cbd9ef5973845c24e1 | e7bde6b09b951d24d66ad09a305b9a8d240f1d45 | refs/heads/master | 2022-05-16T18:20:49.150597 | 2022-05-02T00:51:55 | 2022-05-02T00:51:55 | 80,686,095 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | # Button & Time
# 3.3v = 1,17, 5.0v =2,4 GND = 6,9,14,20,25,30,34,39
# I/O = 3,5,7,8,10,11,12,13,15,16,18,19,21,22,23,24,
# More I/O =26,27,28,29,31,32,33,35,36,37,38,40
import RPi.GPIO as GPIO
import time
from time import sleep
GPIO.setmode(GPIO.BOARD)
timeButton = 18
gotTimeLED = 5
GPIO.setup(gotTimeLED, GPIO.OUT)
GPIO.setup(timeButton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Your Code Here
# init states
timeButtonState = False
print('starting to check Button pressed and print time in millseconds')
startTimeMilliSeconds = int(round(time.time() * 1000))
print('Start time = ',startTimeMilliSeconds)
# Infinete Loop
while True:
# reset Button check
print('checking if Time button is pushed')
while timeButtonState == False:
timeButtonState = GPIO.input(timeButton)
#print(resetButtonState)
if timeButtonState == True:
print('Time Button Pressed')
# Ask or the current time in Milliseconds
currentMilliSeconds = int(round(time.time() * 1000))
print('Button Pusshed at ',currentMilliSeconds)
timeDifference = currentMilliSeconds - startTimeMilliSeconds
print('Start to Button Pusshed difference = ',timeDifference)
if timeDifference > 10000 :
print('----------------- Times up ---------------')
print('starting to check Button pressed and print time in millseconds')
startTimeMilliSeconds = int(round(time.time() * 1000))
print('Start time = ',startTimeMilliSeconds)
sleep(.05)
timeButtonState = False
| [
"[email protected]"
]
| |
6c4d831a16742e2736ba85c002e8ce061fa04f55 | 6d8a241829d4632af665210b27a3a4cd05e2e77f | /snippets/migrations/0002_facedata.py | 9e04225a52466f40ca29450b589072f03508c239 | []
| no_license | wangyu0426/openfaceApi | 25252f7ea64c6a6f969827427443fe1d69e521ab | 2c356d925a9d565d2a5f46ccaa3598dfffee9837 | refs/heads/master | 2021-05-08T10:36:44.548521 | 2018-02-05T16:31:27 | 2018-02-05T16:31:27 | 119,849,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-25 07:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FaceData',
fields=[
('identity', models.IntegerField()),
('rep', models.TextField()),
('phash', models.TextField(primary_key=True, serialize=False)),
('image', models.ImageField(default='pic_folder/None/no-img.jpg', upload_to='pic_folder/')),
],
),
]
| [
"[email protected]"
]
| |
c5c40df7b9b89b1510c0c329855e78d180775dd2 | bcae3a575f8705dc2f8c5aedbaf6c7138f4713bf | /1030A.py | 3d123c7cfd105d953a1da4405448da8100d27d6b | []
| no_license | raghavbiyani19/CodeForces | 50635299a79d9dd04e4a3cb23d8dff073aad8074 | 7365b67feea59ab4b520529535ae2f107f4c714b | refs/heads/master | 2020-12-21T04:15:27.893593 | 2020-07-01T12:59:10 | 2020-07-01T12:59:10 | 236,303,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | n = int(input())
answers = list(map(int,input().split(" ")))[:n]
flag = 0
for item in answers:
if item==1:
flag = 1;
if flag == 1:
print("HARD")
else:
print("EASY") | [
"[email protected]"
]
| |
d835a6c951beb5c578bf0721b074f492301e078a | 747a43e9e8e69f870d8d693214a89f4da6859176 | /examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/dataloader/ensemble_dataset.py | 72e0b8c58e739ffd226d7b02c1d7d81eb35c1e55 | [
"MIT",
"Apache-2.0"
]
| permissive | AprLie/ogb | 32a8ae331e8ebfa287b81015d88ab996b6ddb9b3 | 7e4f25bbc013e76c8f04990e1d9d659a67f5f491 | refs/heads/master | 2023-06-04T14:05:55.669928 | 2021-06-16T02:30:51 | 2021-06-16T02:30:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,277 | py | from ogb.lsc import WikiKG90MDataset
from .KGDataset import KGDataset
import numpy as np
import os.path as osp
class WikiKG90MDatasetEnsemble(WikiKG90MDataset):
def __init__(self, root: str = 'dataset'):
super(WikiKG90MDatasetEnsemble, self).__init__(root)
self._other_entity_feat = None
self._other_nfeat_valid = None
self._other_nfeat_test = None
self._train_val_hrt = None
self._train_fewer_hrt = None
self._train_upsample_hrt = None
self._train_hrt = np.concatenate((self._train_hrt, np.load(osp.join(self.processed_dir, 'trian_val_topk_add_h.npy'))))
@property
def train_val_hrt(self) -> np.ndarray:
'''
'''
if self._train_val_hrt is None:
path2 = osp.join(self.processed_dir, 'val_hrt_wyk.npy')
path3 = osp.join(self.processed_dir, 'upsample_on_val_wyk.npy')
self._train_val_hrt = np.concatenate((self._train_hrt, np.load(path2), np.load(path3)))
print("Training dataset with validation have %d samples" % self._train_val_hrt.shape[0])
return self._train_val_hrt
@property
def train_upsample_hrt(self) -> np.ndarray:
'''
using upsample train data for training
'''
if self._train_upsample_hrt is None:
self._train_upsample_hrt = self._train_hrt
print("Training dataset with filter have %d samples" % self._train_upsample_hrt.shape[0])
return self._train_upsample_hrt
@property
def num_feat_dims(self) -> int:
'''
Dimensionality of relation and entity features obtained by roberta
'''
return 200
@property
def entity_feat(self) -> np.ndarray:
'''
Entity feature
- np.ndarray of shape (num_entities, num_feat_dims)
i-th row stores the feature of i-th entity
* Loading everything into memory at once
* saved in np.float16
'''
if self._entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._entity_feat = np.load(path, mmap_mode='r')
return self._entity_feat
@property
def other_entity_feat(self) -> np.ndarray:
if self._other_entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._other_entity_feat = np.load(path, mmap_mode='r')
return self._other_entity_feat
@property
def other_nfeat_valid(self) -> np.ndarray:
if self._other_nfeat_valid is None:
path = osp.join(self.processed_dir, 'val_cand_occur_feat2.npy')
self._other_nfeat_valid = np.load(path, mmap_mode='r')
return self._other_nfeat_valid
@property
def other_nfeat_test(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'test_cand_occur_feat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def other_nfeat_train(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'train_cand_occur_feat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def all_entity_feat(self) -> np.ndarray:
if self._all_entity_feat is None:
path = osp.join(self.original_root, 'entity_feat.npy')
self._all_entity_feat = np.load(path)
return self._all_entity_feat
class WikiKG90MDatasetEnsembleTrainNFeat(WikiKG90MDataset):
def __init__(self, root: str = 'dataset'):
super(WikiKG90MDatasetEnsembleTrainNFeat, self).__init__(root)
self._other_entity_feat = None
self._other_nfeat_valid = None
self._other_nfeat_test = None
@property
def num_feat_dims(self) -> int:
'''
Dimensionality of relation and entity features obtained by roberta
'''
return 200
@property
def entity_feat(self) -> np.ndarray:
'''
Entity feature
- np.ndarray of shape (num_entities, num_feat_dims)
i-th row stores the feature of i-th entity
* Loading everything into memory at once
* saved in np.float16
'''
if self._entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._entity_feat = np.load(path, mmap_mode='r')
return self._entity_feat
@property
def other_entity_feat(self) -> np.ndarray:
if self._other_entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._other_entity_feat = np.load(path, mmap_mode='r')
return self._other_entity_feat
@property
def other_nfeat_valid(self) -> np.ndarray:
if self._other_nfeat_valid is None:
path = osp.join(self.processed_dir, 'valid_nfeat.npy')
self._other_nfeat_valid = np.load(path, mmap_mode='r')
return self._other_nfeat_valid
@property
def other_nfeat_test(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'test_nfeat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def other_nfeat_train(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'train_nfeat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def all_entity_feat(self) -> np.ndarray:
if self._all_entity_feat is None:
path = osp.join(self.original_root, 'entity_feat.npy')
self._all_entity_feat = np.load(path)
return self._all_entity_feat
class KGDatasetWikiEnsembleNFeat(KGDataset):
'''Load a knowledge graph FB15k
The FB15k dataset has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''
def __init__(self, sys_args, name='wikikg90m'):
self.name = name
path = "/disk4/ogb/link_level/dataset/"
self.dataset = WikiKG90MDatasetEnsembleTrainNFeat(path)
self.train = self.dataset.train_hrt.T
self.n_entities = self.dataset.num_entities
self.n_relations = self.dataset.num_relations
self.valid = None
self.test = None
self.valid_dict = self.dataset.valid_dict
self.test_dict = self.dataset.test_dict
self.entity_feat = self.dataset.entity_feat
self.relation_feat = self.dataset.relation_feat
# self.other_entity_feat_train = self.dataset.other_entity_feat_train
self.other_nfeat_train = self.dataset.other_nfeat_train
self.other_nfeat_valid = self.dataset.other_nfeat_valid
self.other_nfeat_test = self.dataset.other_nfeat_test
print(f'sys_args.use_valid_nfeat: {sys_args.use_valid_nfeat}, sys_args.train_mode: {sys_args.train_mode}')
self.other_nfeat_train = self.dataset.other_nfeat_train
self.other_nfeat_valid = self.dataset.other_nfeat_valid
self.other_nfeat_test = self.dataset.other_nfeat_test
if 't,r->h' in self.valid_dict:
del self.valid_dict['t,r->h']
if 't,r->h' in self.test_dict:
del self.valid_dict['t,r->h']
@property
def emap_fname(self):
return None
@property
def rmap_fname(self):
return None
class KGDatasetWikiEnsemble(KGDataset):
'''Load a knowledge graph FB15k
The FB15k dataset has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''
def __init__(self, sys_args, name='wikikg90m'):
self.name = name
path = "/disk4/ogb/link_level/dataset/"
self.dataset = WikiKG90MDatasetEnsemble(path)
if sys_args.train_with_val:
self.train = self.dataset.train_val_hrt.T
elif sys_args.train_upsample:
self.train = self.dataset.train_upsample_hrt.T
else:
self.train = self.dataset.train_hrt.T
self.n_entities = self.dataset.num_entities
self.n_relations = self.dataset.num_relations
self.valid = None
self.test = None
self.valid_dict = self.dataset.valid_dict
self.test_dict = self.dataset.test_dict
self.entity_feat = self.dataset.entity_feat
self.relation_feat = self.dataset.relation_feat
self.other_entity_feat = self.dataset.other_entity_feat
print(f'sys_args.use_valid_nfeat: {sys_args.use_valid_nfeat}, sys_args.train_mode: {sys_args.train_mode}')
if sys_args.use_valid_nfeat:
if sys_args.train_mode == 'valid':
print('use features on validation')
self.other_nfeat_valid = self.dataset.other_nfeat_valid
else:
print('use features on test')
self.other_nfeat_valid = self.dataset.other_nfeat_test
else:
self.other_nfeat_valid = None
if 't,r->h' in self.valid_dict:
del self.valid_dict['t,r->h']
if 't,r->h' in self.test_dict:
del self.valid_dict['t,r->h']
@property
def emap_fname(self):
return None
@property
def rmap_fname(self):
return None
| [
"[email protected]"
]
| |
7c7cb40880177ef4808f0f7f1831e2f43f0c5b62 | 26d023e21346700a71bae8e477da0ea39d38b345 | /meetup_integration/management/commands/get_group.py | 9213cd91b1b7024cf0e46048ff9986fd4aeb555c | []
| no_license | NejcZupec/meetup-basket | b6ee4232dc4b4f610a3fa8e55fd23a39f3df9ab2 | 1d1cc84db5be4a1357f97b8156b6750fe25f51f8 | refs/heads/master | 2020-12-25T17:24:06.157240 | 2020-05-02T15:07:02 | 2020-05-02T15:07:02 | 25,652,805 | 1 | 1 | null | 2017-01-23T20:39:37 | 2014-10-23T18:53:07 | JavaScript | UTF-8 | Python | false | false | 837 | py | import json
from django.core.management.base import BaseCommand
from meetup_integration.models import Group
from meetup_integration.utils import MeetupAPI
class Command(BaseCommand):
args = "group_urlname"
help = "Get group by urlname."
def handle(self, *args, **options):
try:
group_urlname = args[0]
group_json = MeetupAPI("2/groups", group_urlname=group_urlname).get()["results"][0]
print json.dumps(group_json, indent=4)
Group.objects.get_or_create(
id=group_json["id"],
name=group_json["name"],
link=group_json["link"],
url_name=group_json["urlname"],
timezone=group_json["timezone"],
)
except IndexError:
print self.help, "Args:", self.args
| [
"[email protected]"
]
| |
0f7cae39e2933783f96903bf67c4b59e190f3428 | a451997e7d5830c694e7d06c7fe3452867a98838 | /helper/shared.py | 52c714ef78a3c26a544677abfa20f85912e61fce | []
| no_license | Mshardul/marketplace_app | 7d46f9ba0f9e998df8780f662bce409d23a98515 | d5056384fc6c540c6acaa57fc9d4423ad4213c4d | refs/heads/main | 2023-06-23T11:40:42.629537 | 2021-07-16T20:10:32 | 2021-07-16T20:10:32 | 386,751,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | import sqlite3
errorCodes = [-1, -2]
def execute(connection, command):
try:
cursor = connection.cursor()
cursor.execute(command)
connection.commit()
return 1
except sqlite3.Error as er:
print('sql error!!!')
return -1
except Exception as e:
print('some error occured')
return -2
def executeAndGetResult(connection, command):
try:
cursor = connection.cursor()
cursor.execute(command)
results = cursor.fetchall()
connection.commit()
return results
except sqlite3.Error as er:
print('sql error!!!')
return -1
except Exception as e:
print('some error occured')
return -2
| [
"[email protected]"
]
| |
17a9404fe58bde1a11ae138c1925ab4fe91f325f | 002010b7cf7bf0c674c081f5d86b84dc67122048 | /sales/migrations/0015_installationpaymentreceipt_pointofsalesreceipt.py | c10fc9d16e8468e79b6d0706eb21a5f88a6fed6e | []
| no_license | Afotey-AntyGravity/Receipt-number | 824e2f2099cb458aaf54ad25c973849bed7543eb | 1f33694d9bdfe2bbdd1e0fc68af37dbf71708f3f | refs/heads/main | 2023-05-27T11:14:02.960247 | 2021-06-10T00:46:10 | 2021-06-10T00:46:10 | 375,331,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | # Generated by Django 3.1.3 on 2021-04-22 19:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tools', '0007_tax'),
('sales', '0014_auto_20210422_1741'),
]
operations = [
migrations.CreateModel(
name='PointofSalesReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference_Number', models.CharField(max_length=200, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('discount_Rate', models.FloatField(null=True)),
('unit_Price', models.FloatField(null=True)),
('quantity', models.PositiveIntegerField(default=0)),
('goods_Pending', models.BooleanField()),
('currency', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.currency')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.customer')),
('material_Color', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.materialcolourinformation')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.product')),
('sales_Officer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.salesmaninformation')),
],
),
migrations.CreateModel(
name='InstallationPaymentReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference_Number', models.CharField(max_length=200, null=True)),
('installation_Amount', models.FloatField(null=True)),
('amount_Paid', models.FloatField(null=True)),
('exfactory_Amount', models.FloatField(null=True)),
('PFI_Number', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sales.proformareceipt')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.customer')),
],
),
]
| [
"[email protected]"
]
| |
ae378a7e6c6616477c225aaa5251e45a2a44e7c1 | 6985d11d175d184686f92bfa02aad78d01e09d72 | /study5 list/main.py | 1cfd4498460550f7672cc8e76b53c5f2a9050a3f | []
| no_license | lianhuo-yiyu/python-study | eaf34f60e330065af7ee51c764e10614f029c179 | a7045143115d6a2e92578dd9cf1edc07e45bb1c0 | refs/heads/main | 2023-04-20T22:24:30.744987 | 2021-05-16T04:46:35 | 2021-05-16T04:46:35 | 367,785,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | #python的列表 list 相当于数组 列表中括号[]
#列表创建
lst = ['hello', 100, 'hello', 100, 500, 4, 5]
lst1 = list([98, 46, 45])
print(lst1)
print(lst)
#list不同于数组在于索引从左数是0,1,2,3 还可以从右边数是-1,-2,-3 列表里面的数据类型可以不一样
print(lst1[1])
print(lst1[-2])
#index()获取list中的指定元素的索引位置
print(lst.index('hello')) #存在相同元素只返回第一个的索引
print('100的第一个索引位置为'+str(lst.index(100)))
#在指定的LIST范围里查找指定元素的索引
#print(lst.index(500,0,4)) 指定的位置左开右闭
print(lst.index(500,0,6))
#根据索引获得单个元素
lst2 = ['hello', 'das', 45, 78, 8, 85646, 50, 6, 456, 78]
print(lst2[5])
print(lst2[-1])
#获取list中的多个元素 需要进行切片的操作 列表名[start:stop::step] 切片的结果是原列表片段的一个拷贝 start默认0开始,stop省略默认到N-1,即最后, step默认为1 stop数的时候包含自己,但依旧是左闭右闭
lst3 = lst2[1:4:1]
print(lst3)
print(lst2[1:8:2])
print(lst2)
#step为-
print(lst2[::-1]) #负数step为倒序输出
print(lst2[::-3])
#判断元素是否存在于list
print(45 in lst2)
print('das' not in lst2 )
#列表元素的遍历
lst4 = [1, 2, 2, 456, 45, 100]
for i in lst4:
# print(i , end= '\t')
print(i)
#列表的增删改
#列表末尾添加一个元素 append
lst4.append(99)
print(lst4[-1::-3])
#列表末尾添加至少一个,多个元素
lst4.append(lst2)
print(lst4)
lst4.extend(lst2)
print(lst4)
print(lst4[7])
#列表任意一个位置添加一个元素 insert
lst4.insert(0, 3.1415)
print(lst4)
#任意位置添加多个元素 切片
lst4[1::1] = lst
print(lst4)
#列表元素的删除 remove 删除遇到的第一个元素
lst5 = [10, 20, 30, 40, 50, 60, 10, 20, 30, 46, 798, 9, 8, 7, 4, 56]
lst5.remove(20)
print(lst5)
#pop 根据索引进行元素的删除 同样只能单独移除 不指定删除索引默认删除最后一个元素
lst5.pop(0)
print(lst5)
#多个删除还是切片 删除至少一个元素
a = lst5[1:5]
print(a)
lst5 = lst5[1:5]
print(lst5)
#清空列表
lst5.clear()
print(lst5)
#del直接把这个lst5删除,相当于释放了其内存空间
#del lst5
#print(lst5)
#列表生成式
lst6 = [i for i in range(6)]
print(lst6) | [
"[email protected]"
]
| |
c591850521ec3385a522112a5789fc7abcb51c61 | 0f91f86a1ca75669242030dfb03e3ac3efa3461f | /Spam Classification and Elasticsearch/train_classifier.py | 7af7aa5b61e1bfe36c5c53673f13f509312b5ca6 | []
| no_license | apanimesh061/InformationRetrieval | d2b434b165a59d64d66fad3d7d1d1eb7f7791dce | 9008d47a70624872d5bc01af02c72be757d2df10 | refs/heads/master | 2021-01-19T04:20:32.223938 | 2016-08-02T03:30:16 | 2016-08-02T03:30:16 | 64,720,997 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | # -------------------------------------------------------------------------------
# Name: train_classifier
# Purpose: train the SPAM classifier
#
# Author: Animesh Pandey
#
# Created: 29/04/2015
# Copyright: (c) Animesh Pandey 2015
# -------------------------------------------------------------------------------
from sklearn import tree
from sklearn import linear_model
import csv
import numpy as np
import cPickle
##clf = tree.DecisionTreeClassifier()
clf = linear_model.LinearRegression()
feature_matrix = np.empty((0, 78), dtype='int')
label_array = np.array([], dtype='int')
def csv_reader(file_obj):
global feature_matrix
global label_array
reader = csv.reader(file_obj)
for row in reader:
features = row[1:-1]
feature_matrix = np.vstack((feature_matrix, np.array(features)))
label = row[-1]
label_array = np.append(label_array, label)
if __name__ == "__main__":
csv_path = "new_spam_features.csv"
with open(csv_path, "rb") as f_obj:
f_obj.next()
csv_reader(f_obj)
feature_matrix = feature_matrix.astype(int)
label_array = label_array.astype(int)
clf.fit(feature_matrix, label_array)
s = cPickle.dump(clf, open("lin_reg_SPAM.pkl", "wb"))
| [
"[email protected]"
]
| |
6230c89fbf90c5fe08760c737ce41aeee110b049 | fde10302616f4bbba5a67a33decb65e47765e268 | /misc/v1/reconstruction/meshroom_to_log.py | 798f63beb6c87bdb1d5544ec8dea13d120d761ec | []
| no_license | laurelkeys/ff | b1f562f2e3caf2cd0616ca93fff4fb3872e55cdc | bac774e1f7b3131f559ee3ff1662836c424ebaa5 | refs/heads/master | 2023-02-23T17:46:49.011034 | 2022-01-21T20:31:59 | 2022-01-21T20:31:59 | 214,757,656 | 1 | 1 | null | 2023-02-11T00:30:56 | 2019-10-13T03:58:59 | Python | UTF-8 | Python | false | false | 4,269 | py | import os
import glob
import json
import argparse
import collections
import numpy as np
# ref.:
# [1] https://www.tanksandtemples.org/tutorial/
# [2] https://colmap.github.io/format.html#images-txt
# [3] https://github.com/colmap/colmap/blob/dev/src/estimators/pose.h#L125
# [4] https://github.com/alicevision/meshroom/wiki/Using-known-camera-positions
# [5] https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
# [6] https://github.com/alicevision/meshroom/issues/787
# FIXME rename, so it's not confused with trajectory_io
class CameraPose:
def __init__(self, pose_id, image_path, log_matrix):
self.id = pose_id
self.image_path = image_path
self.log_matrix = log_matrix
def write_SfM_log(T, i_map, filename):
with open(filename, 'w') as f:
for i, traj in enumerate(T):
metadata = i_map[i]
pose = traj.tolist()
f.write(' '.join(map(str, metadata)) + '\n')
f.write('\n'.join(' '.join(
map('{0:.12f}'.format, pose[i])
) for i in range(4)))
f.write('\n')
def convert_Meshroom_to_log(filename, logfile_out, input_images, formatp):
input_images_list = glob.glob(f"{input_images}/*.{formatp}")
if len(input_images_list) == 0:
print("Warning: no images were found (try setting --formatp)")
input_images_list.sort()
n_of_images = len(input_images_list)
T, i_map, TF, i_mapF = [], [], [], []
views = {}
camera_poses = []
with open(filename, 'r') as sfm_file:
sfm_data = json.load(sfm_file)
for view in sfm_data['views']:
views[view['poseId']] = view['path'] # NOTE equal to the 'viewId'
for camera_pose in sfm_data['poses']:
pose_id = camera_pose['poseId']
pose_transform = camera_pose['pose']['transform']
# 3x3 (column-major) rotation matrix
rotation = np.array(
[float(_) for _ in pose_transform['rotation']]
).reshape((3, 3))
rotation[:, 1:] *= -1 # ref.: [2]
# camera center in world coordinates
center = np.array([float(_) for _ in pose_transform['center']])
# homogeneous transformation matrix
mat = np.identity(4)
mat[:3, :3] = rotation
mat[:3, 3] = center
camera_poses.append(CameraPose(pose_id, views[pose_id], mat))
for pose in camera_poses:
A = np.matrix(pose.log_matrix)
T.append(A.I)
image_name = os.path.basename(pose.image_path)
matching = [i for i, s in enumerate(input_images_list) if image_name in s]
i_map.append([pose.id, matching[0], 0])
for k in range(n_of_images):
try:
# find the k-th view id
view_id = [i for i, item in enumerate(i_map) if k == item[1]][0]
i_mapF.append(np.array([k, k, 0], dtype='int'))
TF.append(T[view_id])
except IndexError:
# assign the identity matrix to the k-th view id
# as the log file needs an entry for every image
i_mapF.append(np.array([k, -1, 0], dtype='int'))
TF.append(np.identity(4))
write_SfM_log(TF, i_mapF, logfile_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert Meshroom .sfm data into the Tanks and Temples .log file format"
)
parser.add_argument("in_sfm_fname", help="Input .sfm filename")
parser.add_argument("out_log_fname", help="Output .log filename")
parser.add_argument("images_folder", help="Input images folder path")
parser.add_argument("--formatp", default="jpg", help="Images format")
args = parser.parse_args()
# NOTE .sfm is actually a JSON
_, ext = os.path.splitext(args.in_sfm_fname)
assert ext.lower() in [".sfm", ".json"]
assert os.path.isfile(args.in_sfm_fname)
assert os.path.isdir(args.images_folder)
convert_Meshroom_to_log(
args.in_sfm_fname,
args.out_log_fname,
args.images_folder, args.formatp
)
# e.g.: python meshroom_to_log.py models\Monstree6\Meshroom\publish\cameras.json models\Monstree6\pointcloud\Monstree6_Meshroom_SfM.log models\Monstree6\images\
| [
"[email protected]"
]
| |
7a869242e62651332acf39ad8c78795f8e37978c | d51b1f998f89b3fbb6fbae17cfcb0892321324a9 | /main.py | ba3c5735adf69908a3bdc5a5c82319b17faddefd | []
| no_license | astrawmyer/covid | 54a0b7198ac8d57a1096cdf6ee2c3cb17c358682 | 7bf9ed8945b427606042157b6afbc8240cba11c0 | refs/heads/master | 2023-01-31T14:39:19.147284 | 2020-12-18T01:42:49 | 2020-12-18T01:42:49 | 256,884,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,425 | py | """ Reads US daily data from covidtracking.com and makes a graph of positive per day. """
import requests
import json
import matplotlib.pyplot as plt
import numpy as np
usaData = requests.get("https://covidtracking.com/api/us/daily")
jsonUSA = json.loads(usaData.content)
stateData = requests.get("https://covidtracking.com/api/v1/states/daily.json")
jsonStates = json.loads(stateData.content)
def USApos():
pos = []
for x in jsonUSA:
pos.insert(0,x['positive'])
plt.plot(pos)
plt.show()
def USANewPos():
pos = []
for x in jsonUSA:
pos.insert(0,x['positiveIncrease'])
plt.plot(pos)
plt.show()
def USANewDeath():
pos = []
for x in jsonUSA:
pos.insert(0,x['deathIncrease'])
plt.plot(pos)
plt.show()
def USAdeath():
pos = []
for x in jsonUSA:
pos.insert(0,x['death'])
plt.plot(pos)
plt.show()
def StatePos():
while True:
state = input("What State?\n")
state = state.upper()
pos = []
for x in jsonStates:
if x['state'] == state:
pos.insert(0,x['positive'])
plt.plot(pos)
plt.ylabel("Total Positive Cases")
plt.xlabel("Days")
plt.suptitle(state)
plt.show()
leave = input("Exit to main menu?(Y/N)\n")
leave = leave.upper()
if leave == "Y":
break
def StateNewPos():
while True:
state = input("What State?\n")
state = state.upper()
pos = []
for x in jsonStates:
if x['state'] == state:
pos.insert(0,x['positiveIncrease'])
plt.plot(pos)
plt.ylabel("New Positive Cases")
plt.xlabel("Days")
plt.suptitle(state)
plt.show()
leave = input("Exit to main menu?(Y/N)\n")
leave = leave.upper()
if leave == "Y":
break
def StateDeath():
while True:
state = input("What State?\n")
state = state.upper()
pos = []
for x in jsonStates:
if x['state'] == state:
pos.insert(0,x['death']) #this is causing an error
plt.plot(pos)
plt.ylabel("Total Deaths")
plt.xlabel("Days")
plt.suptitle(state)
plt.show()
leave = input("Exit to main menu?(Y/N)\n")
leave = leave.upper()
if leave == "Y":
break
def StateNewDeath():
while True:
state = input("What State?\n")
state = state.upper()
pos = []
for x in jsonStates:
if x['state'] == state:
pos.insert(0,x['deathIncrease'])
plt.plot(pos)
plt.ylabel("New Deaths")
plt.xlabel("Days")
plt.suptitle(state)
plt.show()
leave = input("Exit to main menu?(Y/N)\n")
leave = leave.upper()
if leave == "Y":
break
if __name__ == "__main__":
main_switch_function = {"1": USApos, "2": USANewPos, "3": USAdeath, "4": USANewDeath, "5": StatePos, "6": StateNewPos, "7": StateDeath, "8": StateNewDeath, "9": exit}
while True:
print("What do you want to do?")
response = input(
"1. USA Cumulative Positive\n2. State Cumulative Positive\"3. State New Positive\n4. USA Cumulative Death\n5. Quit:\n")
try:
main_switch_function.get(response)()
except TypeError:
print("Not a valid input.")
| [
"[email protected]"
]
| |
bfff0b51dad9c1f7c57317b42ae7678d470157f9 | 5a977855c32226303bdec2585dc7d159a801b12f | /material/codigo/pygame-camera/macroscope/macroscope.py | d3adce138aba14217294e7b8d57667288bf88297 | []
| no_license | javacasm/RaspberryAvanzado | 5eecd62c3598b2e36bc4ee91c3e96b33734903bf | bbccc7b8af8c2c9b5e2e298b3e5c063d9aa056c1 | refs/heads/master | 2021-09-11T16:45:18.263481 | 2018-04-09T22:06:11 | 2018-04-09T22:06:11 | 108,258,204 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,612 | py | #!/usr/bin/env python
# RPi Macroscope by [email protected], December 2017
# https://www.raspberrypi.org/learning/push-button-stop-motion/worksheet/
# https://raspberrypi.stackexchange.com/questions/28302/python-script-with-loop-that-detects-keyboard-input
import os, sys, time
from picamera import PiCamera
from time import sleep
from datetime import datetime, timedelta
from gpiozero import Button, LED
import pygame
import RPi.GPIO as GPIO
from PIL import Image
# Pin assignments
zoomOutButton = 22
zoomInButton = 27
redButton = 17
LEDring = 18
greenLED = 4
# Flags / counters
zoomOutPressed = False
zoomInPressed = False
redButtonPressed = False
redPressCount = 0
LEDringOn = True
OUT = True
IN = False
timeLapseSeconds = 5
msgPostTime = datetime.now()
msgShowTime = 5
helpScreen = False
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(zoomOutButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(zoomInButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(redButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LEDring, GPIO.OUT)
GPIO.setup(greenLED, GPIO.OUT)
GPIO.output(LEDring, GPIO.HIGH)
GPIO.output(greenLED, GPIO.LOW)
# Pygame / camera setup
pygame.init()
pygame.display.set_mode((1,1))
camera = PiCamera()
zoomfactor = 0
camera.start_preview()
camera.vflip = True
camera.hflip = True
os.chdir('/boot') # save folders to /boot directory
def zoom(direction):
global zoomfactor
if direction == IN:
zoomfactor = zoomfactor + 10
if zoomfactor > 40:
zoomfactor = 40
if direction == OUT:
zoomfactor = zoomfactor - 10
if zoomfactor < 0:
zoomfactor = 0
zoom1 = zoomfactor / 100.0
zoom2 = 1.0 - zoom1 * 2
camera.zoom = (zoom1, zoom1, zoom2, zoom2)
print(camera.zoom, zoomfactor)
def getFileName():
last_date = time.strftime("%Y%m%d", time.localtime())
img_count = 0
ds = time.strftime("%Y%m%d", time.localtime())
#Figure out if USB drive attached
dirList = os.listdir('/media/pi')
if dirList:
os.chdir('/media/pi/%s' % dirList[0])
else:
os.chdir('./')
if not os.path.isdir(ds):
os.mkdir(ds)
print("%s directory created." % ds)
else: # find highest number
dir_list = os.listdir(os.path.join(os.getcwd(), ds))
max_count = 0
for file_name in dir_list:
try:
count = int(file_name.split("_")[0])
except ValueError:
count = 0
if count >= max_count:
max_count = count + 1
img_count = max_count
print("img_count = %s" % img_count)
start_time = time.localtime()
ds = time.strftime("%Y%m%d", start_time)
ts = time.strftime("%H%M", start_time)
if last_date != ds and os.path.isdir(last_date):
img_count = 0
last_date = time.strftime("%Y%m%d", time.localtime())
if not os.path.isdir(ds):
os.mkdir(ds)
logging.debug("%s directory created." % ds)
img_count = 0
new_name = '%s/%04d_%s_%s' % (ds, img_count, ds, ts)
return new_name
def takePicture():
global msgPostTime
new_name = getFileName()
GPIO.output(greenLED, GPIO.HIGH)
camera.annotate_text = ''
camera.capture('%s.jpg' % new_name, use_video_port=True)
print('capture %s/%s.jpg' % (os.getcwd(), new_name))
camera.annotate_text = 'Saved %s/%s.jpg' % (os.getcwd(), new_name)
msgPostTime = datetime.now()
GPIO.output(greenLED, GPIO.LOW)
def takeVideo():
global msgPostTime
new_name = getFileName()
#o = camera.add_overlay(red_dot.tobytes(), size=red_dot.size, layer=3)
camera.annotate_text = ''
camera.start_recording('%s.h264' % new_name)
print('recording')
GPIO.output(greenLED, GPIO.HIGH)
while camera.recording:
camera.wait_recording(1)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
camera.stop_recording()
GPIO.output(greenLED, GPIO.LOW)
print('video %s.h264' % new_name)
camera.annotate_text = 'Saved %s.h264' % new_name
camera.remove_overlay(o)
if event.key == pygame.K_q:
GPIO.output(LEDring, GPIO.LOW)
sys.exit()
os.system('sudo MP4Box -add %s.h264 %s.mp4' % (new_name, new_name))
camera.annotate_text = 'Converted to %s.mp4' % new_name
msgPostTime = datetime.now()
def takeSequence(seconds):
o = camera.add_overlay(green_dot.tobytes(), size=green_dot.size, layer=3)
print('starting sequence')
while True:
camera.annotate_text = ''
takePicture()
nextShot = (datetime.now() + timedelta(seconds=seconds)).replace(microsecond=0)
while datetime.now() < nextShot:
sleep(1)
txt = str(int((nextShot- datetime.now()).total_seconds()))
camera.annotate_text = txt
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
camera.remove_overlay(o)
camera.annotate_text = ''
if event.key == pygame.K_q:
GPIO.output(LEDring, GPIO.LOW)
sys.exit()
else:
print('end sequence')
return
#red_dot = Image.open('red_dot.png')
#green_dot = Image.open(os.path.join('macroscope', 'green_dot.png'))
#help = Image.open(os.path.join('macroscope', 'help.png'))
while True:
# button handling
if(not GPIO.input(zoomInButton)):
if not zoomInPressed:
zoomInPressed = True
zoom(IN)
else:
zoomInPressed = False
if(not GPIO.input(zoomOutButton)):
if not zoomOutPressed:
zoomOutPressed = True
zoom(OUT)
else:
zoomOutPressed = False
# Clear message
if (datetime.now() - timedelta(seconds=msgShowTime)) < msgPostTime:
pass
else:
camera.annotate_text = ''
if(not GPIO.input(redButton)):
while (not GPIO.input(redButton)): # button still pressed
redPressCount += 1
if redPressCount == 10:
if LEDringOn:
GPIO.output(LEDring, GPIO.LOW)
LEDringOn = False
else:
GPIO.output(LEDring, GPIO.HIGH)
LEDringOn = True
if redPressCount > 20:
camera.stop_preview()
GPIO.output(LEDring, GPIO.LOW)
os.system('sudo shutdown now')
sleep(0.1)
if redPressCount < 10:
takePicture()
else:
redPressCount = 0
# key handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
GPIO.output(18, GPIO.LOW)
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_h:
if not helpScreen:
o = camera.add_overlay(help.tobytes(), size=help.size, layer=3)
helpScreen = True
else:
camera.remove_overlay(o)
helpScreen = False
if event.key == pygame.K_SPACE:
takePicture()
if event.key == pygame.K_v:
takeVideo()
if event.key == pygame.K_t:
takeSequence(timeLapseSeconds)
if event.key == pygame.K_a:
print('camera.resolution: ', camera.resolution)
print('camera.iso: ', camera.iso)
print('camera.exposure_speed: ', camera.exposure_speed)
print('camera.awb_gains: ', camera.awb_gains)
print('camera.awb_mode: ', camera.awb_mode)
if event.key == pygame.K_q:
camera.stop_preview()
GPIO.output(18, GPIO.LOW)
dirList = os.listdir('/media/pi')
if dirList:
os.system('sync')
os.system('sudo umount /media/pi/%s' % dirList[0])
pygame.quit()
sys.exit()
if event.key == pygame.K_MINUS or event.key == pygame.K_EQUALS:
if event.key == pygame.K_EQUALS:
zoom(IN)
if event.key == pygame.K_MINUS:
zoom(OUT)
if event.key == pygame.K_f:
camera.hflip = not(camera.hflip)
camera.vflip = not(camera.vflip)
print("flip!")
| [
"[email protected]"
]
| |
2c84b88b2248d07bc3fdaaa1c84bb232af9210d9 | 6558766df338730772d02a318e65bfa46cff40b6 | /apps/openprofession/migrations/0037_simulizatordata.py | 856c0742ca3f953cf907bf71e7a9709af76ae251 | []
| no_license | ITOO-UrFU/openedu | 02dc265872e2de1d74b1e8eca0c6596c7860841a | 7c6507d671653fc0ccf35b5305f960eb32e7159f | refs/heads/master | 2021-01-20T21:16:39.987744 | 2019-08-07T10:02:12 | 2019-08-07T10:02:12 | 101,761,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-13 14:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openprofession', '0036_pdavailable'),
]
operations = [
migrations.CreateModel(
name='SimulizatorData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fio', models.CharField(max_length=2048, verbose_name='ФИО')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('phone', models.CharField(max_length=255, verbose_name='Телефон')),
('username', models.CharField(blank=True, max_length=255, null=True, verbose_name='username')),
('password', models.CharField(blank=True, max_length=255, null=True, verbose_name='password')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
options={
'verbose_name': 'заявка на участие в симуляторе',
'verbose_name_plural': 'заявки на участие в симуляторе',
},
),
]
| [
"[email protected]"
]
| |
96f3e80f04bc682d09494ac4303b866f15fbe926 | c2dbcf00d4e1f5730e3088822d3abcc8ad5f8e90 | /restaurants/migrations/0004_auto_20190720_0834.py | bebf6c08b7e31789b8e58f399a28487be894c392 | []
| no_license | sfares85/task_10 | af3f6045c961cfd715067f7e8b1732eda4665a91 | 452d9abc498cc0b9f92f16f878ad98b32ca9539b | refs/heads/master | 2020-06-22T03:21:13.801780 | 2019-07-20T13:26:58 | 2019-07-20T13:26:58 | 197,619,834 | 0 | 0 | null | 2019-07-18T16:12:43 | 2019-07-18T16:12:42 | null | UTF-8 | Python | false | false | 1,221 | py | # Generated by Django 2.1.5 on 2019-07-20 08:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('restaurants', '0003_auto_20180417_0853'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('price', models.DecimalField(decimal_places=3, max_digits=10)),
('description', models.TextField()),
],
),
migrations.AddField(
model_name='restaurant',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='item',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurants.Restaurant'),
),
]
| [
"[email protected]"
]
| |
8641472ef5dfd1cb9a2a6258a62732f29e906a23 | 14bfecfc9854dcbb77f42c1b387ae51875fe5d4f | /doorMotion.py | 44291bc8d50909effb756fc4c873b1ff48a3b211 | []
| no_license | rcreddy06/Hack-A-Door | 5a64a15d59b865a44ab59536ab213d3b238ea97c | 598b9f4883165aac40c84ed3da004449ea51100b | refs/heads/master | 2020-05-18T03:22:44.456478 | 2015-07-15T18:07:23 | 2015-07-15T18:07:23 | 39,152,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # Reference https://www.modmypi.com/blog/raspberry-pi-gpio-sensing-motion-detection
# coding: utf-8
import RPi.GPIO as GPIO
import time
import sys
GPIO.setmode(GPIO.BCM)
PIR_PIN = 21
GPIO.setup(PIR_PIN, GPIO.IN)
def MOTION(PIR_PIN):
print "Door Open!"
execfile('ledtest.py')
print "Wait for Door to Open"
time.sleep(2)
print "Ready"
try:
GPIO.add_event_detect(PIR_PIN, GPIO.RISING, callback=MOTION)
while 1:
time.sleep(2)
except KeyboardInterrupt:
GPIO.cleanup()
| [
"[email protected]"
]
| |
52d1846ef9a4c6284f0c539b5d764e7dfc000969 | 32433810ef0421e98dcce1c8d6bd2f7a22cc12dc | /U_net/evaluation.py | 7064db6a9be567e3f629d5a1dde96abcfdecb18a | []
| no_license | stabling/projects | 927ba90868c49ededc41b540fde307b17302b9f3 | aefebf988a2b9652ab694cf5e9339fa12cdbd7cd | refs/heads/master | 2020-07-11T03:07:25.522426 | 2019-10-22T00:33:57 | 2019-10-22T00:33:57 | 204,432,604 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | import torch
# SR : Segmentation Result
# GT : Ground Truth
def get_accuracy(SR, GT, threshold=0.5):
SR = SR > threshold
GT = GT == torch.max(GT)
corr = torch.sum(SR == GT)
tensor_size = SR.size(0) * SR.size(1) * SR.size(2) * SR.size(3)
acc = float(corr) / float(tensor_size)
return acc
def get_sensitivity(SR, GT, threshold=0.5):
# Sensitivity == sensitive == Recall
SR = SR > threshold
GT = GT == torch.max(GT)
# TP : True Positive
# FN : False Negative
TP = ((SR == 1) + (GT == 1)) == 2
FN = ((SR == 0) + (GT == 1)) == 2
SE = float(torch.sum(TP)) / (float(torch.sum(TP + FN)) + 1e-6)
return SE
def get_specificity(SR, GT, threshold=0.5):
# specificity == specificity
SR = SR > threshold
GT = GT == torch.max(GT)
# TN : True Negative
# FP : False Positive
TN = ((SR == 0) + (GT == 0)) == 2
FP = ((SR == 1) + (GT == 0)) == 2
SP = float(torch.sum(TN)) / (float(torch.sum(TN + FP)) + 1e-6)
return SP
def get_precision(SR, GT, threshold=0.5):
SR = SR > threshold
GT = GT == torch.max(GT)
# TP : True Positive
# FP : False Positive
TP = ((SR == 1) + (GT == 1)) == 2
FP = ((SR == 1) + (GT == 0)) == 2
PC = float(torch.sum(TP)) / (float(torch.sum(TP + FP)) + 1e-6)
return PC
def get_F1(SR, GT, threshold=0.5):
# Sensitivity == Recall
SE = get_sensitivity(SR, GT, threshold=threshold)
PC = get_precision(SR, GT, threshold=threshold)
F1 = 2 * SE * PC / (SE + PC + 1e-6)
return F1
def get_JS(SR, GT, threshold=0.5):
# JS : Jaccard similarity
SR = SR > threshold
GT = GT == torch.max(GT)
Inter = torch.sum((SR + GT) == 2)
Union = torch.sum((SR + GT) >= 1)
JS = float(Inter) / (float(Union) + 1e-6)
return JS
def get_DC(SR, GT, threshold=0.5):
# DC : Dice Coefficient
SR = SR > threshold
GT = GT == torch.max(GT)
Inter = torch.sum((SR + GT) == 2)
DC = float(2 * Inter) / (float(torch.sum(SR) + torch.sum(GT)) + 1e-6)
return DC
| [
"[email protected]"
]
| |
2e18eb6be957873c745d543ad85b0034148f4db7 | a305d9a74d2332fdfaacc1d041a5ec8a22dae2db | /square.py | ab50b0bb35101b31be278505919126ef1b588e57 | []
| no_license | CodArtist/Fractals | d340fd124ec70b12f3dc63f93d086614278869c6 | 2c1e98db3414c8fb7830efa88973de43a49f6315 | refs/heads/main | 2022-12-27T00:36:58.890657 | 2020-10-12T05:15:31 | 2020-10-12T05:15:31 | 303,283,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import turtle
bob =turtle.Turtle()
bob.speed(100000)
bob.penup()
bob.forward(-150)
bob.pendown()
color = ["green","blue","red"]
i=0
def star(turtle,size,col):
if size <=10:
return
else:
turtle.fillcolor(col)
turtle.begin_fill()
for i in range(4):
turtle.forward(size)
star(bob,size/2,col)
turtle.left(90)
turtle.end_fill()
star(bob,250,color[0])
turtle.done() | [
"[email protected]"
]
| |
98f212d0387b519b346a5a4365cc5f62ecaf13bd | 4596e3f0097723e402c8fe21933f016d99cdb08b | /two_strings_equal.py | ae0844187de26d825498e9b5244b34bb1d530c1c | []
| no_license | Harshavardhanteja7/Python-Assignments | 3b0af25d088e42c24407be53ccd78064df5244db | cae8657618ea44ff62268e315e26adb655c9cbbf | refs/heads/master | 2022-11-09T06:19:37.462857 | 2020-06-27T17:25:30 | 2020-06-27T17:25:30 | 273,303,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
"""two_strings_equal.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1tWdMpgZwyMC_Sbp5iSaa9SRuARW-oXWx
"""
str_1=str(input("Enter the 1st string: "))
str_2=str(input("Enter the 2nd string: "))
if str_1==str_2:
print("both strings are equal")
else:
print("both strings are not equal") | [
"[email protected]"
]
| |
2670d4a865a34c6b12557710f3b157b604b6bf68 | 148cb99e0f23679c20243470ad62dc4155aa5252 | /baseinfo/migrations/0016_auto_20191206_0806.py | 8d98453f8d32b5509559f2cb37242495b58c3609 | []
| no_license | Hamidnet220/tax | 46060f24b55a4f348194599d59247ff9435f4379 | 000051be5df6a98f679d13a94e37b9ee30efd5a9 | refs/heads/master | 2020-06-19T09:41:20.998214 | 2019-12-10T01:01:17 | 2019-12-10T01:01:17 | 196,666,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.1.7 on 2019-12-06 08:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('baseinfo', '0015_guarantee_guarantee_file'),
]
operations = [
migrations.AlterField(
model_name='guarantee',
name='guarantee_type',
field=models.IntegerField(choices=[(1, ' ضمانت نامه شرکت در مناقصه'), (2, ' ضمانت نامه پیش پرداخت'), (3, ' ضمانت نامه حسن انجام کار')], verbose_name='عنوان ضمانت نامه'),
),
]
| [
"[email protected]"
]
| |
96cd03528b16f7d81aceb85fe867c042d368eee5 | 94ebceffc6e946ac64b606e0ab68a8921ae300de | /python/generate_candidate.py | f9ad44e7abc217fcea3414cf592f3fac74fef586 | []
| no_license | chaozc/poselet_code | 026d7cbc62211eae8a1ebf4e27e7b9b2263a5ca4 | 7e24f343d07bd8589cba974e387d4ab20319de81 | refs/heads/master | 2016-09-05T15:53:25.939303 | 2015-02-16T12:26:57 | 2015-02-16T12:26:57 | 29,904,855 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import cv2
import os
import numpy as np
from sys import argv
from MyRandint import MyRandint
from poseletFunc import *
if __name__ == "__main__":
#argv[1]: num of bin
conf = config()
flist = os.listdir(conf['img_dir'])
noFile = len(flist)
cntImg = 0
totPatch = 0
rand = MyRandint()
bsz = int(argv[1])
for f in flist:
fname = conf['img_dir']+'/'+f
if fname.find('.jpg') > 0:
imgID = f[:-4]
img = cv2.imread(fname)
h, w, d = img.shape
xbin = w/bsz
ybin = h/bsz
cntPatch = 0
ouf = open(conf['candidate_dir']+'/'+f[:-4], 'w')
for i in range(bsz):
for j in range(bsz):
for k in range(500):
out_line = random_patch(imgID, [i*xbin, (i+1)*xbin], [j*ybin, (j+1)*ybin], [4, 15], w, h, conf['json_dir'])
if out_line != False:
cntPatch += 1
ouf.write(out_line+'\n')
ouf.close()
cntImg += 1
totPatch += cntPatch
print cntImg, '/', noFile, 'img finished, Patches for img:', cntPatch, 'Total Patches:', totPatch | [
"[email protected]"
]
| |
4a93f895c4f634e938a00892439e5aa761ecf1b5 | 3d61fe0f49f5d344fc32a6faa799f0a46deec9a5 | /2017/AoC-2017-13v2.py | 290341cd10f0980f65f036c7d6c15a02ddab3382 | []
| no_license | sbeaumont/AoC | 558296fd26cd5272e33d3cb9113c09e4945c98ac | 406eda614d8434d8feb71fe1262f1fda54972a12 | refs/heads/master | 2022-12-13T07:38:36.089775 | 2022-12-04T21:11:49 | 2022-12-04T21:11:49 | 75,467,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | PUZZLE_INPUT_FILE_NAME = "AoC-2017-13-input.txt"
# Parse parent and children names
with open(PUZZLE_INPUT_FILE_NAME) as puzzle_input_file:
firewall = {int(line.split(":")[0]): int(line.split(":")[1]) for line in puzzle_input_file.readlines()}
max_depth = max(firewall, key=firewall.get)
def check_layers(wait_time):
severity = 0
for d, r in firewall.iteritems():
at_layer_time = wait_time + d
if at_layer_time % (2*r-2) == 0:
severity += d * r
return severity
print(check_layers(0))
# delay = 0
# sev = 1
# while sev:
# pass | [
"[email protected]"
]
| |
40cd6567e784c1030fabefe70f52924dc26380f0 | f1c2578a7774a4873932badf4359ea0344ccdecd | /DSM FW Pack/deployable/NodeFolders/node_modules/jquery/node_modules/contextify/build/config.gypi | 7f412d1edefa168a37d4d0767cc8aec9c6f76670 | [
"MIT"
]
| permissive | heppy83/DSMFramework | 0f30d0bb282a14680755a03a8d342b4ffab90bb9 | 84aa26c93d1e811f10f3c6f6916184abef6f46b7 | refs/heads/master | 2020-03-29T13:21:17.365084 | 2013-05-19T10:09:20 | 2013-05-19T10:09:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,738 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_prefix": "out/dist-osx/usr/local",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/stesoi/.node-gyp/0.8.17",
"copy_dev_lib": "true",
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"userignorefile": "/Users/stesoi/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/stesoi/.npm-init.js",
"userconfig": "/Users/stesoi/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.8.17",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "",
"cache": "/Users/stesoi/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.8.17",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/Users/stesoi/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
]
| |
0a46787003d24c0133f2ba04614a8d583391bd69 | 419c703dd00a6f2219a8f81408364d7f4fa9e3db | /cgi-bin/model.py | 52d5878dce25f269db670d1cac2e35f35a2b0963 | []
| no_license | borhanreo/digit_predict | 3897663f1a2689e915551c94194592baade81ec4 | 81a322b39c60e9793c3df1f857112651b2eb5f5e | refs/heads/master | 2020-04-20T06:26:53.609897 | 2019-02-01T11:10:03 | 2019-02-01T11:10:03 | 168,684,380 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | """
Define Convolutional Nerual Network model for MNIST input
"""
from tflearn import DNN
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Define model
model = DNN(network, tensorboard_verbose=0)
| [
"[email protected]"
]
| |
327af865bfb543690b70b52f2def73ab01b9997d | 0c759cd37338db689ce36c22944d4a6f8f9fe74e | /app/web/urls.py | f34b140f798fd05d83f4dac018e2c881404c359d | []
| no_license | mkramb/acegit | 4dee079719c82fa9f95e9c5d59ac6af72a211f7a | 76c8383496a5531461aab59ceb16daff7a3c406a | refs/heads/master | 2021-01-10T06:43:54.496288 | 2016-02-24T17:39:16 | 2016-02-24T17:39:16 | 52,461,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | from django.conf.urls import patterns, url, include
from django.views.generic import TemplateView
from app.web.views import HookView
urlpatterns = patterns(
'',
url(
r'^$', TemplateView.as_view(template_name='web/home.html'), name='web'
),
url(r'^hook/(?P<repo_id>.+)$', HookView.as_view(), name='web_hook'),
url(r'', include('social.apps.django_app.urls', namespace='social')),
)
| [
"[email protected]"
]
| |
8a5e4ac253eace3e40205da364061599b73b25be | fdb2263be1f531b19f68159a47bc80e92b04e594 | /resnet.py | 9741e0a64ff5f4b6f2c00d3b5e23a5ca633b40ee | []
| no_license | YutoNishimura-v2/ResNet_alpha | 8e3e5276b030123a0d15fc81d416aa3806ce16f1 | 209253d59d750982c2bc8f92629ff778fc653583 | refs/heads/main | 2023-03-02T17:01:16.273459 | 2021-02-10T12:57:59 | 2021-02-10T12:57:59 | 337,329,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,334 | py | import torch
from torch import Tensor
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
"""
引用 : https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
class Bottleneck_alpha(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
downsample4y: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck_alpha, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.downsample4y = downsample4y
self.stride = stride
def forward(self, x: tuple) -> tuple:
x, y = x
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None) and (out.size() != x.size()):
identity = self.downsample(x)
if (self.downsample4y is not None) and (out.size() != y.size()):
y = self.downsample4y(y)
out += identity
out = self.relu(out)
return (1.5*out - 0.5*y, x)
class ResNet_alpha(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck_alpha]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet_alpha, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], layer_num=0)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0], layer_num=1)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1], layer_num=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2], layer_num=3)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck_alpha):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck_alpha]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False, layer_num: int = None) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layer_channels = [(64, 256, 1), (64, 512,2), (256, 1024,4), (512, 2048, 4)]
downsample4y_1 = nn.Sequential(
conv1x1(layer_channels[layer_num][0], layer_channels[layer_num][1], layer_channels[layer_num][2]),
norm_layer(layer_channels[layer_num][1]),
)
layer_channels = [(64, 256, 1), (256, 512,2), (512, 1024, 2), (1024, 2048, 2)]
downsample4y_2 = nn.Sequential(
conv1x1(layer_channels[layer_num][0], layer_channels[layer_num][1], layer_channels[layer_num][2]),
norm_layer(layer_channels[layer_num][1]),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, downsample4y_1,self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, downsample4y=downsample4y_2, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x_1 = self.maxpool(x)
x_2 = self.layer1((x_1, x_1)) # 出力はtuple
x_3 = self.layer2((x_2[0], x_1))
x_4 = self.layer3((x_3[0], x_2[0]))
x_5 = self.layer4((x_4[0], x_3[0]))
x = self.avgpool(x_5[0])
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet_alpha(
arch: str,
block: Type[Union[BasicBlock, Bottleneck_alpha]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet_alpha:
model = ResNet_alpha(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet_alpha50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet_alpha:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet_alpha('resnet50', Bottleneck_alpha, [3, 4, 6, 3], pretrained, progress,
**kwargs) | [
"[email protected]"
]
| |
902119212ac3b5b8562ee82d7fa00a5c7d370c0f | d204cc764f4cbbb4755c3b630c0790fee53628ee | /subscriptions/views.py | cc6fa370f2b47f0412b08233bbd0170aac8ab8a1 | []
| no_license | jace3k/satellite-backend | 9e0f73e389da10126d5a38f36db9503b470adda2 | bec7f19234a03134482d918fa68cd2674e14a7a1 | refs/heads/master | 2020-05-04T13:25:32.501293 | 2019-04-04T17:11:36 | 2019-04-04T17:11:36 | 179,159,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from subscriptions.models import Subscription
from subscriptions.serializers import SubscriptionSerializerDetail
class SubscriptionViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
serializer_class = SubscriptionSerializerDetail
queryset = Subscription.objects.all()
| [
"[email protected]"
]
| |
acc3a59ce841b1ef522f84e87223ee9a40cfc144 | 419df78283bfd823e78242c67cd092fde5796843 | /4/accessing_last_item_in_a_list.py | 16aaa25c4f5bf3e2d4514c5bd958ce2d1db9899d | []
| no_license | SandeshChinchole/python-programs | 4f27c8a4fea010ef928d2177e4d582165feb798a | 04c0b7fe281810771c7965a1ae11df6868a11052 | refs/heads/master | 2023-06-11T07:33:17.361490 | 2021-06-27T23:41:43 | 2021-06-27T23:41:43 | 310,763,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py |
# accessing last item using index
smoothies = ['coconut', 'strawberry', 'banana', 'pineapple', 'acai berry']
length = len(smoothies)
last = smoothies[length-1]
print('Last item using index: ', last)
# accessing last item using negative index (take the last three smoothies on our list and print them)
smoothies = ['coconut', 'strawberry', 'banana', 'pineapple', 'acai berry']
last = smoothies[-1]
second_last = smoothies[-2]
third_last = smoothies[-3]
print('last item: ', last + ', ' + 'second last: ' + second_last + ', ' + 'third_last: ' + third_last)
| [
"[email protected]"
]
| |
63050037ff16f231d2e413b0f7308febc154a77d | ddbb862a813d28154547f46bf3f9af9297e355f7 | /Monte Carlo1.1.py | 39f1e4ee233fdf5e16fbaff8e7998b2b5a73bd50 | []
| no_license | YizhuoLu/EE511project4 | 23aa001f18ec63ed3762d843eed4a9437769ba15 | 3761e43418399d513afac53e690628cd6e20fc07 | refs/heads/master | 2020-03-23T11:58:24.831547 | 2018-07-19T05:39:14 | 2018-07-19T05:39:14 | 141,529,251 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | import numpy as np
import matplotlib.pyplot as plt
import math
s = []
N = 100
def uniformSample(N):
for i in range(N):
x = np.random.uniform(0, 1)
y = np.random.uniform(0, 1)
s.append([x, y])
return s
z = np.array(uniformSample(N))
count = 0
for i in range(len(z)):
if math.sqrt(1-z[i,0]**2) >= z[i,1]:
count = count + 1
# print('The number of samples that fall within the quarter unit-circle is:', count)
area = count / N
print("The estimated area of the inscribed quarter circle is:", area)
pi = 4 * area
print('The estimated value of pi is:', pi)
fig = plt.figure(1)
ax = fig.add_subplot(1, 1, 1)
circ = plt.Circle((0, 0), radius=1, edgecolor='r', facecolor='white')
sca = plt.scatter(z[:, 0], z[:, 1], s=7, c='b')
ax.add_artist(circ)
ax.add_artist(sca)
plt.title('scatter plot of 100 uniform distributed samples')
plt.xlabel('X')
plt.ylabel('Y')
plt.show() | [
"[email protected]"
]
| |
a698324d7311e2d9e04a2ca8aeeb1e61b0da888f | b1a7dcee012b5ceff3da580d3f7b3129a4b561af | /learn/settings.py | 6954595b8cd11d3c30e8b79ed24822e29a0b7c62 | []
| no_license | azamjon180/learn | 730f3555beb33f4127f6f5fe23f4c6a19295373b | c54172019a2146dfb4c500137a7dd5e232d5fd4e | refs/heads/master | 2022-12-14T03:13:39.070059 | 2020-09-17T08:27:02 | 2020-09-17T08:27:02 | 294,693,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,405 | py | """
Django settings for learn project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n%*qenxt8y75-3+^)x_%+n-0^ld+f*yxy60$a9nkjz_1dly31#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'base',
'mptt',
'ckeditor',
'ckeditor_uploader',
]
CKEDITOR_UPLOAD_PATH = 'ckupload/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'home.context_processors.menu',
],
},
},
]
WSGI_APPLICATION = 'learn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
MEDIA_URL = '/media/'
MEDIA_ROOT = (os.path.join(BASE_DIR, 'media'))
| [
"[email protected]"
]
| |
2bc555da0fa56b4908c116b59adf873b9464b273 | a508eddc00e29a64b1ee7e201617d4a7d4544056 | /manage.py | 18f71d68c9ab139a98120b6182d8567fc6f359ca | []
| no_license | Dishanth-G/ASD_detection | f7e572e548e04bedceeda566577ee5a2bd38b602 | c4a9ca651fbc85f04e411e038f838e4def3f5699 | refs/heads/master | 2023-01-21T22:03:22.582557 | 2020-12-02T17:52:00 | 2020-12-02T17:52:00 | 312,204,551 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DeployModel.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
61bf434ef9c9cd94c19c4eee92ececa6952d9ec4 | 1a78a2d3e71c26998c0022ac487fa6545e5d2e79 | /programs/remove_mtdna.py | f4e1eb669d70f1e9d11842bde2e8b5851a5d489a | []
| no_license | ncc95/project3 | 5471cc92e26ecc3d9e192d07f4d2c651c8aee032 | 1055285f463e80ba2857bbf63fe42f5e532a4869 | refs/heads/master | 2021-09-19T03:23:56.371561 | 2018-07-22T12:39:05 | 2018-07-22T12:39:05 | 141,892,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | def remove_mtdna_from_sam(input,output):
with open(input) as f:
with open(output, "w") as g:
for line in f:
if line.rsplit("\t")[2] != "chrM":
g.write(line)
remove_mtdna_from_sam("aln.sam", "aln_no_mtdna.sam")
| [
"[email protected]"
]
| |
5c2e5d83b929a6609dc21da6bbf6211579490049 | 9f0acd43cb381e4945565d591001516d992fb031 | /tr_option/opt20005.py | daebd24819a039043ff7586b380cc2c5b818d96a | [
"MIT"
]
| permissive | atheling44/KiwoomTrader | ab10a404f5f879d00f0662eaad53c10b1aa8d931 | ae630513a738e495f9010b2671043220f07d22ed | refs/heads/master | 2023-03-16T10:07:39.252689 | 2019-12-26T11:36:36 | 2019-12-26T11:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | from tr_option.base import KWTR
from copy import deepcopy
# [ opt20005 : 업종분봉조회요청 ]
class Opt20005(KWTR):
def __init__(self, core):
super().__init__(core)
self.rq_name = self.tr_code = 'opt20005'
self.record_name_single = '업종분봉차트'
self.header_single = [
'업종코드',
]
self.record_name_multiple = '업종준봉조회'
self.header_multiple = [
'현재가', '거래량', '체결시간', '시가', '고가', '저가', '대업종구분', '소업종구분', '종목정보', '전일종가',
]
def tr_opt(self, input0, tick_range, prev_next, screen_no):
# 업종코드 = 001:종합(KOSPI), 002:대형주, 003:중형주, 004:소형주 101:종합(KOSDAQ), 201:KOSPI200, 302:KOSTAR, 701: KRX100 나머지 ※ 업종코드 참고
# 틱범위 = 1:1틱, 3:3틱, 5:5틱, 10:10틱, 30:30틱
self.core.set_input_value('업종코드', input0)
self.core.set_input_value('틱범위', tick_range)
self.core.comm_rq_data(self.rq_name, self.tr_code, prev_next, screen_no)
self.tr_data = deepcopy(self.core.receive_tr_data_handler[self.tr_code][screen_no])
return self.tr_data
| [
"[email protected]"
]
| |
b90a0305484644a6728e50d68732ee9e6989bb14 | 478fad340a97fc14d365b95bbd6f8ac1dcc71953 | /121/Solution.py | d76a39e78ef9cadd8e4004cc32002f4a3d0d5986 | []
| no_license | sandyg05/leetcode | 93cca3b3ce4f38cf1ea1c6d3e8400d7b6b776c37 | e9d8036e2be6dbd1b8c958431e07dc35b88ebfa8 | refs/heads/master | 2022-07-16T10:03:59.529470 | 2020-05-13T05:35:49 | 2020-05-13T05:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | """
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.
Note that you cannot sell a stock before you buy one.
Example 1:
Input: [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Not 7-1 = 6, as selling price needs to be larger than buying price.
Example 2:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
class Solution:
def maxProfit(self, prices):
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for num in prices:
if num < min_price:
min_price = num
if num - min_price > max_profit:
max_profit = num - min_price
return max_profit | [
"[email protected]"
]
| |
a5075c05b906fd9b22238fdec92901e48a23a4c7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02817/s121273903.py | c74297c5df6c42af00d7dd1b1408fea1fb86e8a6 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | x=list(input().split())
print(x[1]+x[0])
| [
"[email protected]"
]
| |
73811021c3a1796cb4a40a018b7f159349ac18de | 419667d83ba1500e241863a509f6ebd23c7dc8b6 | /addDatabase.py | c9ccfec8dcb3444dd12c8a4cec1ec3243d66b3e8 | []
| no_license | mahavasu/Item-Catalog | c0de566bbf9b5450d9c6dca02160aa1a059d1f99 | d2b3b6d5c935fdd65faff0a66f1ccacf0dc93a9e | refs/heads/master | 2021-01-19T10:13:54.677485 | 2015-10-05T16:59:29 | 2015-10-05T16:59:29 | 38,135,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Item, User
engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
Fernandez = User(name="Fernandez", email="[email protected]", picture="fddf")
session.add(Fernandez)
Peter = User(name="Peter", email="[email protected]", picture="lkjljk")
session.add(Peter)
Basketball = Category(name="Basketball")
session.add(Basketball)
Baseball = Category(name="Baseball")
session.add(Baseball)
Frisbee = Category(name="Frisbee")
session.add(Frisbee)
Snowboarding = Category(name="Snowboarding")
session.add(Snowboarding)
Judo = Category(name="Judo")
session.add(Judo)
Cricket = Category(name="Cricket")
session.add(Cricket)
Hockey = Category(name="Hockey")
session.add(Hockey)
Googles = Item(name="Googles", category=Snowboarding, owner=Peter)
session.add(Googles)
Bat = Item(name="Bat", category=Baseball, owner=Peter)
session.add(Bat)
Stick = Item(name="Stick",
category=Hockey,
owner=Fernandez,description = "Stick to play the hockey game")
session.add(Stick)
Jersey = Item(name="Jersey", category=Hockey, owner=Fernandez)
session.add(Jersey)
Snowboard = Item(name="Snowboard", category=Snowboarding, owner=Peter)
session.add(Snowboard)
Ball = Item(name="Ball",
category=Cricket,
owner=Fernandez,description = "Ball to play the game")
session.add(Ball)
Glouse = Item(name="Glouse",
category=Cricket,
owner=Fernandez,description = "Glouse is to protect your hand")
session.add(Glouse)
session.commit()
| [
"[email protected]"
]
| |
2bc1fcc7b2f69fdf2a3224d4812bd611106212fd | ca3a49676cdf1016b2d729f0432b451d35b7a281 | /bad-solutions/add.py | 698f358d6ce95ac0d0d3832d8c44a19f39928fd9 | [
"MIT"
]
| permissive | SquareandCompass/code-align-evals-data | 3bb71b605316f56bb27466f23706a329f3fb4938 | 97446d992c3785d6605f1500b2c9b95d042e7b9c | refs/heads/main | 2023-06-19T12:47:56.277363 | 2021-07-21T00:22:56 | 2021-07-21T00:22:56 | 640,147,842 | 0 | 1 | null | 2023-05-13T06:22:30 | 2023-05-13T06:22:29 | null | UTF-8 | Python | false | false | 575 | py | def add(lst):
"""Given a non-empty list of integers lst. add the even elements that are at odd indices..
Examples:
add([4, 2, 6, 7]) ==> 2
"""
return sum([lst[i] for i in range(1, len(lst) / 2, 2) if lst[i] % 2 == 0])
def check(candidate):
# Check some simple cases
assert candidate([4, 88]) == 88
assert candidate([4, 5, 6, 7, 2, 122]) == 122
assert candidate([4, 0, 6, 7]) == 0
assert candidate([4, 4, 6, 8]) == 12
# Check some edge cases that are easy to work out by hand.
if __name__ == "__main__":
check(add)
| [
"[email protected]"
]
| |
0ef9cb329b0021acd62f178d23501015177ba738 | 49e6d84b54d093f5ef318bd0a61cdc8d3f712cb8 | /digitalmanifesto/wsgi.py | 2ec366fad0959abeb16f02a6748a434e00d50fe5 | [
"MIT"
]
| permissive | gwhigs/digital-manifesto | deeed2f19eb5b5743d18a14b99fcc08aa6293e09 | e6dc01b9a228943f065709f1eef6a8266564dabd | refs/heads/master | 2021-01-24T10:47:43.210444 | 2018-12-29T21:22:55 | 2018-12-29T21:22:55 | 30,809,164 | 4 | 1 | null | 2015-06-14T20:42:03 | 2015-02-14T21:20:19 | Python | UTF-8 | Python | false | false | 407 | py | """
WSGI config for digitalmanifesto project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "digitalmanifesto.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"[email protected]"
]
| |
f096bfa561df5e515ae7053d95cb1615b3355d9e | 4dbfd15a57f72cb2b4cd7b9585c1809891e57833 | /volunteer.na4u.ru/noneisnull.py | a095ed0b248ed7ba23f23ae363af530d29930e73 | []
| no_license | papalos/volunteerReg | 2959ad9c47915d69a711c775737a33883dd7e6f3 | 3a55bac19299c90438c9c9e3b041097eda139d4d | refs/heads/master | 2022-12-08T22:57:03.569832 | 2020-09-18T09:53:32 | 2020-09-18T09:53:32 | 296,578,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | def nulling(n):
return 0 if n is None else n
| [
"[email protected]"
]
| |
0e9a38665795bd642e825d58f2ad24a34ebb9439 | f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6 | /ICA_SDK/test/test_instrument.py | aeb50805e0f9f55fb14b9f8cfa35dbca74de8c92 | []
| no_license | jsialar/integrated_IAP_SDK | 5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb | c9ff7685ef0a27dc4af512adcff914f55ead0edd | refs/heads/main | 2023-08-25T04:16:27.219027 | 2021-10-26T16:06:09 | 2021-10-26T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | # coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ICA_SDK
from ICA_SDK.models.instrument import Instrument # noqa: E501
from ICA_SDK.rest import ApiException
class TestInstrument(unittest.TestCase):
"""Instrument unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Instrument
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ICA_SDK.models.instrument.Instrument() # noqa: E501
if include_optional :
return Instrument(
id = '0',
name = '0',
description = '0',
serial_number = '0',
control_software_version = '0',
operating_software_version = '0',
instrument_type = '0'
)
else :
return Instrument(
)
def testInstrument(self):
"""Test Instrument"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
c1497ab46aea616ebff8be15bec4cfafe17f1d6f | 74431a8edaa5b313a48ca501f737983c08fe3de6 | /aws_nag/utility.py | 4db3dbf2ccc32c8cb31612c7d39ebb2f156314ce | []
| no_license | dliggat/aws-resource-notifier | 0f2d73e197f7c518045cce9bae87c7587785a252 | 74f76eddddad6cb763bec9a6b444a9414e2eab61 | refs/heads/master | 2020-03-23T15:48:52.473607 | 2018-07-21T02:54:36 | 2018-07-21T02:54:46 | 141,776,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import boto3
class Utility(object):
_aws_account_id = None
THRESHOLD = 11
@classmethod
def aws_account_id(cls):
"""Query for the current account ID by inspecting the default security group."""
if cls._aws_account_id is None:
cls._aws_account_id = int(boto3.client('ec2').describe_security_groups(
GroupNames=['default'])['SecurityGroups'][0]['OwnerId'])
return cls._aws_account_id
@classmethod
def format_for_display(cls, item):
"""Shortens a string for display."""
if len(item) < cls.THRESHOLD:
return item
else:
return item[0:cls.THRESHOLD] + '[...]'
| [
"[email protected]"
]
| |
9791ec8a8e82cb5b0b7f1957f907a4f4768882cc | 3b69b6bffacd1247eed2a1d5f074301b90ef976c | /config/settings.py | a87bf951d79027ab6e52c66969f8e80035a0c3f8 | [
"MIT"
]
| permissive | AdamSpannbauer/django_hello_world | 326aaf3cc9c43e2f6c6ecca78b826b976da97b34 | 610b02c932df41692fe557f3d96cb97e95e28242 | refs/heads/main | 2023-01-28T19:25:54.750254 | 2020-12-06T21:35:49 | 2020-12-06T21:35:49 | 318,765,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "a_p99!4nw*eym+%&9&l#uusb98lh9_52y$^2n0ze4v#7unn(pt"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"pages",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [str(BASE_DIR.joinpath("templates"))],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
| [
"[email protected]"
]
| |
3d28100d3c9863e403e81490f9fc06870275da9c | 3af4d4cf1a53d460476d2e5f2f38761919a8a3b9 | /pyDLib/Core/controller.py | 506ee3faa08a25296f2bc592e6049c4b4416de6b | [
"MIT"
]
| permissive | lmoshood/abstractDataLibrary | 1b87affe6478d52989e09c3ab0cfc4630d1ff6d1 | 16be28e99837e40287a63803bbfdf67ac1806b7b | refs/heads/master | 2020-07-09T05:10:27.204031 | 2019-03-28T15:50:34 | 2019-03-28T15:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,188 | py | """Implements theory_main objects : interfaces wrapper by interInterfaces """
import json
import logging
import os
from typing import Dict, Type, List, Optional
from . import data_model, groups, formats, sql, threads, security
from . import init_all, StructureError
class Callbacks:
def __getattr__(self, key):
def f(*args, **kwargs):
logging.error(
f"No callback with name {key}, args {args} and kwargs {kwargs} registered !")
return f
class abstractInterface:
"""Base class for the main driver of the application. GUI parts will register callbacks through
set_callback, add_reset_function, add_update_function.
Then, the interface update datas and refresh the rendering with update_rendering, reset_rendering.
It's up to the interface to choose between a soft update or a hard one (with reset)
"""
ACCES: Type[data_model.abstractAcces] = data_model.abstractAcces
"""Default acces. Used in the convenient function get_acces"""
CALLBACKS: List[str] = []
"""Functions that a GUI module should provide.
Note : a callback update_toolbar should be also set (by the mai GUI application)"""
TABLE: Optional[str] = None
"""Default table used to build main collection (through get_all)"""
base: data_model.abstractBase
collection: groups.Collection
main: 'abstractInterInterfaces'
def __init__(self, main: 'abstractInterInterfaces', permission):
"""
Constructeur.
:param main: Abstract theory_main
:param permission: Integer coding permission for this module
"""
self.main = main
self.base = main.base
self.permission = permission
self.sortie_erreur_GUI = lambda s, wait=False: print(s)
self.sortie_standard_GUI = lambda s, wait=False: print(s)
self.updates = [] # graphiques updates
self.resets = [] # graphiques resets
self.callbacks = Callbacks() # Containers for callbacks
self.set_callback("update_toolbar", lambda: None)
self.threads = [] # Active threads
self.collection = self.get_all()
def reset(self):
self._reset_data()
self._reset_render()
def update(self):
self._update_data()
self._update_render()
def _reset_data(self):
self.collection = self.get_all()
def _update_data(self):
pass
def _reset_render(self):
for f in self.resets:
f()
self.callbacks.update_toolbar()
def _update_render(self):
for f in self.updates:
f()
self.callbacks.update_toolbar()
def add_reset_function(self, f):
self.resets.append(f)
def remove_reset_function(self, f):
try:
self.resets.remove(f)
except ValueError:
logging.exception("Unknown reset function !")
def add_update_function(self, f):
self.updates.append(f)
def remove_update_function(self, f):
try:
self.updates.remove(f)
except ValueError:
logging.exception("Unknown update function !")
def set_callback(self, name, function):
setattr(self.callbacks, name, function)
def get_acces(self, Id) -> data_model.abstractAcces:
return self.ACCES(self.base, Id)
def get_all(self) -> groups.Collection:
table = getattr(self.base, self.TABLE)
c = groups.Collection(self.ACCES(self.base, i) for i in table)
return c
def recherche(self, pattern, entete, in_all=False):
"""abstractSearch in fields of collection and reset rendering.
Returns number of results.
If in_all is True, call get_all before doing the search."""
if in_all:
self.collection = self.get_all()
self.collection.recherche(pattern, entete)
self._reset_render()
return len(self.collection)
def launch_background_job(self, job, on_error=None, on_success=None):
"""Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success
"""
if not self.main.mode_online:
self.sortie_erreur_GUI(
"Local mode activated. Can't run background task !")
self.reset()
return
on_error = on_error or self.sortie_erreur_GUI
on_success = on_success or self.sortie_standard_GUI
def thread_end(r):
on_success(r)
self.update()
def thread_error(r):
on_error(r)
self.reset()
logging.info(
f"Launching background task from interface {self.__class__.__name__} ...")
th = threads.worker(job, thread_error, thread_end)
self._add_thread(th)
def _add_thread(self, th):
self.threads.append(th)
th.done.connect(lambda: self.threads.remove(th))
th.error.connect(lambda: self.threads.remove(th))
def get_labels_stats(self):
"""Should return a list of labels describing the stats"""
raise NotImplementedError
def get_stats(self):
"""Should return a list of numbers, compliant to get_labels_stats"""
raise NotImplementedError
def get_actions_toolbar(self):
"""Return a list of toolbar constitution. One element has the form
( identifier , callback , tooltip , enabled ).
This function is called every time the toolbar updates"""
return []
@staticmethod
def filtre(liste_base, criteres) -> groups.Collection:
"""
Return a filter list, bases on criteres
:param liste_base: Acces list
:param criteres: Criteria { `attribut`:[valeurs,...] }
"""
def choisi(ac):
for cat, li in criteres.items():
v = ac[cat]
if not (v in li):
return False
return True
return groups.Collection(a for a in liste_base if choisi(a))
def copy_to_clipboard(self, text):
self.main.callbacks.copy_to_clipboard(text)
class abstractInterInterfaces:
"""
Entry point of abstrat tasks.
Responsible of loading data, preferences, configuration,...
"""
PATH_PREFERENCES = "preferences.json"
DEBUG = {}
"""debug modules"""
BASE_CLASS = data_model.abstractBase
INTERFACES_MODULE = None
"""Modules containing all interfaces required"""
base: data_model.abstractBase
autolog: Dict
interfaces: Dict[str, abstractInterface]
def __init__(self):
self.base = None
self.users = {}
self.autolog = {}
self.modules = {} # Modules to load
self.interfaces = {}
self.preferences = self.load_preferences()
self.callbacks = Callbacks()
def load_preferences(self):
if not os.path.isfile(self.PATH_PREFERENCES):
logging.warning(
f"No user preferences file found in {os.path.abspath(self.PATH_PREFERENCES)} !")
return {}
with open(self.PATH_PREFERENCES, "r", encoding="utf8") as f:
try:
return json.load(f, object_hook=formats.date_decoder)
except json.JSONDecodeError:
logging.exception("User preferences file corrupted !")
return {}
def update_preferences(self, key, value):
if key is not None:
self.preferences[key] = value
with open(self.PATH_PREFERENCES, "w", encoding="utf8") as f:
json.dump(self.preferences, f, cls=formats.JsonEncoder)
logging.info(f"Preference {key} updated.")
def load_remote_data(self, callback_etat=print):
"""
Load remote data. On succes, build base.
On failure, raise :class:`~.Core.exceptions.StructureError`, :class:`~.Core.exceptions.ConnexionError`
:param callback_etat: State renderer str , int , int -> None
"""
callback_etat("Chargement des utilisateurs", 0, 1)
self._load_users()
self.base = self.BASE_CLASS.load_from_db(callback_etat=callback_etat)
def _load_users(self):
"""Default implentation requires users from DB.
Should setup `users` attribute"""
r = sql.abstractRequetesSQL.get_users()()
self.users = {d["id"]: dict(d) for d in r}
def reset_interfaces(self):
"""Reset data and rendering for all interfaces"""
for i in self.interfaces.values():
i.reset()
def set_callback(self, name, f):
"""Store a callback accessible from all interfaces"""
setattr(self.callbacks, name, f)
def load_modules(self):
"""Should instance interfaces and set them to interface, following `modules`"""
if self.INTERFACES_MODULE is None:
raise NotImplementedError("A module containing interfaces modules "
"should be setup in INTERFACES_MODULE !")
else:
for module, permission in self.modules.items():
i = getattr(self.INTERFACES_MODULE,
module).Interface(self, permission)
self.interfaces[module] = i
def export_data(self, bases, savedir):
"""Packs and zip asked bases (from base).
Saves archive in given savedir"""
raise NotImplementedError
def import_data(self, filepath):
"""Unziip archive. Chech integrity.
Overwrite current base"""
raise NotImplementedError
def init_modules(self, dev=False):
"""load_credences, load_configuration, init_modules from Core should be overridden"""
init_all(dev)
def update_credences(self, url):
"""Download and update credences file.
Modules should be re-initialized after"""
raise NotImplementedError
def update_configuration(self, monitor=print):
"""Download and update configuration files. Url is given in credences"""
raise NotImplementedError
def has_autolog(self, user_id):
"""
Read auto-connection parameters and returns local password or None
"""
try:
with open("local/init", "rb") as f:
s = f.read()
s = security.protege_data(s, False)
self.autolog = json.loads(s).get("autolog", {})
except FileNotFoundError:
return
mdp = self.autolog.get(user_id, None)
return mdp
def loggin(self, user_id, mdp, autolog):
"""Check mdp and return True it's ok"""
r = sql.abstractRequetesSQL.check_mdp_user(user_id, mdp)
if r():
# update auto-log params
self.autolog[user_id] = autolog and mdp or False
self.modules = self.users[user_id]["modules"] # load modules list
dic = {"autolog": self.autolog, "modules": self.modules}
s = json.dumps(dic, indent=4, ensure_ascii=False)
b = security.protege_data(s, True)
with open("local/init", "wb") as f:
f.write(b)
self.mode_online = True # authorization to execute bakground tasks
return True
else:
logging.debug("Bad password !")
def loggin_local(self):
try:
with open("local/init", "rb") as f:
s = f.read()
s = security.protege_data(s, False)
modules = json.loads(s)["modules"]
except (KeyError, FileNotFoundError) as e:
raise StructureError(
"Impossible des lire les derniers modules utilisés !")
else:
self.modules = {k: 0 for k in modules} # low permission
self.mode_online = False
self.base = self.BASE_CLASS.load_from_local()
def launch_debug(self, mode_online):
self.mode_online = mode_online
self.modules = self.DEBUG
self.base = self.BASE_CLASS.load_from_local()
self.load_modules()
def direct_load_remote_db(self):
tables = [t for t in sorted(self.base.TABLES)]
l = sql.abstractRequetesSQL.load_data(tables)()
self.base = self.BASE_CLASS(l)
| [
"[email protected]"
]
| |
54698d3df9e146bba7518dcc501a9dbbc6db33e5 | 4d2937e2c974b65939a5021b36bf98d1088d92c2 | /hw1/test.py | b278da97353896a1e46fc4eabec5bd2d0d666976 | []
| no_license | tommy413/ADLxMLDS2017 | 97d9ab35fdc56c0982a26ffc991bb5878d942981 | d613908ab9b3b3be9fa93b939d17cfde5908ca57 | refs/heads/master | 2021-03-27T12:28:44.149677 | 2018-01-16T09:58:51 | 2018-01-16T09:58:51 | 105,556,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | # -*- coding: utf-8 -*-
import sys
import numpy as np
import pickle as pkl
import os
import csv
#os.environ["THEANO_FLAGS"] = "device=gpu0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
from keras.models import load_model,model_from_json
import json
data_path = "feature/%s" % sys.argv[1]
model_name = sys.argv[2]
output_path = sys.argv[3]
dim_dict = {"mfcc" : 39, "fbank" : 69}
def trimming(seq):
flag = 0
last = ""
count = 0
new_seq = ""
for i in seq:
if i != last :
last = i
if flag == 0 :
if i != 'L' :
if count >= 2 :
new_seq = new_seq + i
flag = 1
elif flag == 1 :
if count >= 2 :
if new_seq[-1] != i:
new_seq = new_seq + i
count = 1
elif i == last :
count = count + 1
while new_seq[-1] == 'L' :
new_seq = new_seq[:-1]
return new_seq
testId2Ix = pkl.load(open("%s/testId2Ix.pkl" % data_path,'rb'))
label2Ix = pkl.load(open("%s/label2Ix.pkl" % data_path,'rb'))
Xtest = pkl.load(open("%s/Xtest.pkl" % data_path,'rb'))
print("Files loaded.")
Ix2label = {}
for key in label2Ix.keys():
Ix2label[label2Ix[key]] = key
model = model_from_json(json.load(open("model/%s.json" % model_name)))
model.load_weights("model/%s_model_weight.hdf5" % model_name)
model.summary()
print('Model loaded.')
result = model.predict_classes(Xtest)
Ix2TestId = {}
for key in testId2Ix.keys():
Ix2TestId[testId2Ix[key]] = key
final_rst = []
for row in result:
rstStr = ""
for element in row :
rstStr = rstStr + str(Ix2label[element])
final_rst.append(rstStr)
trimed_rst = []
for row in final_rst:
trimed_rst.append( trimming(list(row)) )
result_file = open(output_path,'w')
wf = csv.writer(result_file)
wf.writerow(['id','phone_sequence'])
for i in range(0,len(trimed_rst)):
wf.writerow([Ix2TestId[i],trimed_rst[i]])
result_file.close()
| [
"[email protected]"
]
| |
68079fdbc941656a3155d91dfdbba596998dcc14 | 754435b83f377583c571773696367aa8827dae92 | /remover.py | 65b8177902c8403cbca198badcc86b764c3068c3 | []
| no_license | szyangming/emma_tools | 8a42a72049144af0177d17885f5ff282118cf1c4 | 2046754a985b66740ea613f0f8c006e7adb0e887 | refs/heads/master | 2023-04-26T20:22:01.491919 | 2021-05-19T02:10:37 | 2021-05-19T02:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/30 10:23 上午
# @Author : CrissChan
# @Site : https://blog.csdn.net/crisschan
# @File : remove.py
# @Intro : 删除所有类型的类
import os
import shutil
class Remover(object):
@classmethod
def dir(cls,rm_root):
'''
递归删除目录以及目录内的所有内容.
:param root:删除目录
:return:
'''
for root, dirs, files in os.walk(rm_root, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.removedirs(rm_root)
@classmethod
def dir_under(cls,rm_root):
'''
:param rm_root:删除目录下所有的文件,目录不删除存在
:return:
'''
shutil.rmtree(rm_root)
@classmethod
def file(cls,rm_file):
'''
删除文件
:param root:删除文件路径
:return:
'''
if os.path.exists(rm_file):
os.unlink(rm_file)
@classmethod
def dir_empty(cls,rm_root):
'''
递归删除目录,如果有一个目录非空则会抛出异常
:param rm_root: 删除目录
:return:
'''
if os.path.exists(rm_root):
os.removedirs(rm_root)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.