blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f33e9739d5c9414b68e42ed8a44983cb9e5b2cad
|
439868a2bdd6a13a299ed2d0becf877591102d2f
|
/semrep/semrep.py
|
e148ae422a3ce644f2a549165c33d10d939864e9
|
[] |
no_license
|
sariogonfer/Master-DecisionSupport-TFM
|
b4b1f6032715264f5b4541a0a3978826c71a7350
|
02b4faeec0be4d6356acb5e7c5eb19af9b388c76
|
refs/heads/master
| 2020-06-04T05:52:27.747886 | 2019-06-20T13:15:20 | 2019-06-20T13:15:20 | 191,895,683 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,063 |
py
|
from collections import ChainMap
from copy import deepcopy
from functools import partial
from itertools import chain, product
from tempfile import NamedTemporaryFile as ntf
from xml.dom import minidom
import os
import subprocess
import networkx as nx
import spacy
class NotProcessedException(Exception):
"""The text is not processed yet. Process it before to continue."""
pass
def entity2dict(e):
attr = e.attributes
return {
attr.get('id').value: {
'name': attr.get('name', attr.get('text', '')).value,
'semtypes': attr.get('semtypes', '').value.split(',')
}
}
class SemRepWrapper:
_doc = None
_dom = None
def __init__(self, text, lang='en'):
nlp = spacy.load(lang)
self._doc = nlp(text)
@property
def doc(self):
return self._doc
@property
def dom(self):
if not self._dom:
self._dom = minidom.parseString(self._raw_processed)
return self._dom
def _process_semrep(self, resolved_text):
cmd = '/opt/public_semrep/bin/'
cmd += 'semrep.v1.8 -L 2018 -Z 2018AA -X {in_} {out}'
with ntf(mode='w') as in_, ntf('r+') as out:
in_.write(resolved_text)
in_.seek(0)
cmd = cmd.format(in_=in_.name, out=out.name)
subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
foo = '</SemRepAnnotation>'
self._raw_processed = out.read()
self._raw_processed += (
foo if foo not in self._raw_processed else ''
)
return
def process(self):
return self._process_semrep(self._doc.text)
@classmethod
def load(cls, path):
"""Intence an object from the a semrep output file."""
with open(path, 'r') as f_in:
obj = cls('')
obj._raw_processed = f_in.read()
return obj
@property
def _utterances(self):
for u in self.dom.getElementsByTagName('Utterance'):
yield u
@property
def utterances(self):
for u in self._utterances:
yield u
@property
def _entities(self):
for u in self._utterances:
yield u.getElementsByTagName('Entity')
@property
def entities(self):
for u in self._entities:
yield dict(ChainMap(*[entity2dict(e) for e in u]))
@property
def _predications(self):
for u in self._utterances:
yield u.getElementsByTagName('Predication')
@property
def predications(self):
def _entities_map(u):
return dict(ChainMap(*[entity2dict(e)
for e in u.getElementsByTagName('Entity')]))
for u in self._utterances:
predications = list()
ents = _entities_map(u)
for pr in u.getElementsByTagName('Predication'):
s = ents[pr.getElementsByTagName('Subject')[0].attributes.get(
'entityID').value]
p = pr.getElementsByTagName('Predicate')[0].attributes.get(
'type').value
o = ents[pr.getElementsByTagName('Object')[0].attributes.get(
'entityID').value]
predications.append({
'subject': s,
'predicate': p,
'object': o
})
if not predications:
continue
yield predications
def ent2node(G, e):
aux = dict(e)
name = aux.pop('name')
G.add_node(name, **aux)
return name
def _set_graph_attributes(G, edge_attrs={}):
for k, v in edge_attrs.items():
nx.set_edge_attributes(G, v, k)
class SemRepGraph(SemRepWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._G_e = nx.MultiDiGraph()
self._G_p = nx.MultiDiGraph()
def process(self):
super().process()
self.process_graphs()
def _process_entities_graph(self):
ents = chain.from_iterable(u.values() for u in self.entities)
for s, t in product(ents, repeat=2):
if s == t:
continue
self._G_e.add_edge(
ent2node(self._G_e, s),
ent2node(self._G_e, t)
)
def _process_predications_graph(self):
for p in chain.from_iterable(self.predications):
self._G_p.add_edge(
ent2node(self._G_p, p['subject']),
ent2node(self._G_p, p['object']),
type=p['predicate']
)
def process_graphs(self):
self._process_entities_graph()
self._process_predications_graph()
def G_entities(self, edge_attrs={}):
aux = deepcopy(self._G_e)
_set_graph_attributes(aux, edge_attrs)
return aux
def G_predications(self, edge_attrs={}):
aux = deepcopy(self._G_p)
_set_graph_attributes(aux, edge_attrs)
return aux
|
[
"[email protected]"
] | |
83fbadd72e038adad5bad8df605e71ebda5b3319
|
99cb9304d2e7fc48721b3a63f8a3340d13ff1246
|
/figures/forzado.py
|
64924d5a696b4a03653f22409bac8ad77b283eb6
|
[] |
no_license
|
restrepo/Mecanica
|
1e9ee12845330d47a223534a8328c02eeaee4996
|
8df79ae95ab5228c72d6e7d17fffa1111f098fcb
|
refs/heads/master
| 2020-04-09T11:55:05.990581 | 2018-08-16T21:13:45 | 2018-08-16T21:13:45 | 5,415,048 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 446 |
py
|
from pylab import *
w=np.arange(40,49.9,0.05)
plt.plot(w,1./(50.**2-w**2),'b-')
w=np.arange(50.1,60,0.05)
plt.plot(w,1./(50.**2-w**2),'b-')
plt.vlines(50,-0.06,0.06,color='k', linestyles='dashed')
plt.hlines(0,40,60,color='k',lw=0.5)
plt.xticks([])
plt.yticks([])
plt.xlim(40,60)
plt.ylim(-0.06,0.06)
plt.xlabel(r'$\omega_0$',size=20)
plt.title(r'$\frac{1}{(\omega_0^2-\omega^2)}$',size=20,verticalalignment='bottom',)
plt.savefig('forzado.pdf')
|
[
"[email protected]"
] | |
10968168e7fad76f8f96c8b8334aafb9050f112f
|
9e3cb463a24f0f7be711c0f182fb5fc502011750
|
/Codes/coins.py
|
6edb5ce682e5ffc11daee8661c4bc93c91d65076
|
[] |
no_license
|
rodrigobmedeiros/Udemy-Python-Programmer-Bootcamp
|
0d1b732743eb1b7061d76c66c1398ae016c5ab3a
|
5a16b0f48ef6cf6a775d288ccd3c399a8a12811d
|
refs/heads/master
| 2021-02-15T07:50:04.051416 | 2020-03-20T21:03:38 | 2020-03-20T21:03:38 | 244,878,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 18:53:19 2020
@author: Rodrigo Bernardo Medeiros
@function: coins
"""
def coins(n_coins):
# I need to analyze heads and tails, because its a 0-1 outcoming, I will
# use a boolean to represent this event.
isheads = True
coins_list = []
# n is the step number for each iterarion
n_init = 1
n_step = 2
# Create a list with all coins in heads position
for i_coins in range(n_coins):
coins_list.append(isheads)
print(coins_list)
while n_init < n_coins:
for i in range(n_init,n_coins,n_step):
coins_list[i] = not coins_list[i]
n_init += 1
n_step += 1
print(coins_list)
total_heads = coins_list.count(True)
return total_heads
|
[
"[email protected]"
] | |
06385329021d4dfb1ba21644f6906dbaf727dd05
|
a4d2e7a3585cff10b3ed52bc633dc9710ce68a87
|
/CL/model/data_loader.py
|
17c9f8a6d7cdbf1a385e491059b5be4ebbc2b915
|
[] |
no_license
|
runngezhang/deep-dereverb
|
1bb58168ddcffd627990984a12916051e6bfe4d3
|
f5867997f7fd3bab095c9b2afa836e87fd31db06
|
refs/heads/main
| 2023-08-25T00:23:01.710278 | 2021-10-15T19:10:38 | 2021-10-15T19:10:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,696 |
py
|
"""Alimentar con datos al bucle de entrenamiento"""
import sys
MAIN_PATH='/home/martin/deep-dereverb/model'
sys.path.append(MAIN_PATH) #Para poder importar archivos .py como librerias
from tensorflow.keras.utils import Sequence
import numpy as np
import os
import glob
import random
import librosa
import soundfile as sf
import pandas as pd
class DataGenerator(Sequence):
'Generates data for Keras'
def __init__(self, dataframe, list_IDs, batch_size=8, shuffle=False):
'Initialization'
self.dataframe = dataframe
self.list_IDs = list_IDs
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
#print(index)
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
#print(self.list_IDs)
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
#if self.shuffle == True:
# np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples'
# Initialization
x_clean = np.empty((self.batch_size, 256, 256))
x_reverb = np.empty((self.batch_size, 256, 256))
#import pdb; pdb.set_trace()
# Generate data
for i, ID in enumerate(list_IDs_temp):
reverb, clean, tr = gen_stft(self.dataframe, ID)
#print(tr)
x_clean[i], x_reverb[i] = clean, reverb
return x_reverb, x_clean # [input, ground truth]
def build_generators(dataframe, batch, alpha=0.9):
#seleccion random de sets
audio_numbers = list(range(0, len(dataframe)))
random.shuffle(audio_numbers)
train_n = int(len(audio_numbers)*alpha)
validation_n = len(audio_numbers) - train_n
train_numbers = audio_numbers[:train_n]
train_numbers.sort()
val_numbers = audio_numbers[train_n:]
val_numbers.sort()
partition = {'train' : train_numbers,
'val' : val_numbers}
dataframe_sorted = dataframe.sort_values('tr', ascending=False)
# Generators
train_gen=DataGenerator(dataframe_sorted,partition['train'], batch_size=batch)
val_gen=DataGenerator(dataframe_sorted,partition['val'], batch_size=batch)
return train_gen, val_gen
EPS = np.finfo(float).eps
def normalise(array):
array_min = -75
array_max = 65
norm_array = (array - array_min) / (array_max - array_min + EPS)
return norm_array
def gen_stft(dataframe, ID):
clean_path = dataframe.iat[ID, 0]
reverb_path = dataframe.iat[ID, 1]
clean = np.load(clean_path)
reverb = np.load(reverb_path)
#Genero las STFT
stft_clean = librosa.stft(clean, n_fft=512, hop_length=128)[:-1,:]# Descarto altas frecuencias
stft_clean = np.abs(stft_clean)
stft_reverb = librosa.stft(reverb, n_fft=512, hop_length=128)[:-1,:]
stft_reverb = np.abs(stft_reverb)
#Escala logaritmica
log_stft_clean = librosa.amplitude_to_db(stft_clean)
log_stft_reverb = librosa.amplitude_to_db(stft_reverb)
#Normalizacion
norm_stft_reverb = normalise(log_stft_reverb)
norm_stft_clean = normalise(log_stft_clean)
return norm_stft_reverb, norm_stft_clean, dataframe.iat[ID,2]
|
[
"[email protected]"
] | |
92a7c5238a237e2aa1f03f860a29e40dea618a06
|
8466c271575f3432981afd8f76afeaf9366570c9
|
/player.py
|
a9d28014b32af43370a336ef2135b6fc38604630
|
[] |
no_license
|
BethGranados/Snake
|
5c2686909552939ba429674300f8eef8718182b5
|
81c94b1c952cc8c721154de8a396e2a55552753a
|
refs/heads/master
| 2020-12-25T14:58:18.691737 | 2016-08-25T04:08:08 | 2016-08-25T04:08:08 | 66,459,407 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
import actor
class player(actor.actor):
movement = (0, 10) #Always starts going left.
def move(self):
self.cord = (self.cord[0] + self.movement[0], self.cord[1] + self.movement[1])
def changeDirX(self, x):
self.movement = (x, self.movement[1])
def changeDirY(self, y):
self.movement = (self.movement[0], y)
def getDirX(self):
return self.movement[0]
def getDirY(self):
return self.movement[1]
def display(self):
return self.image
|
[
"[email protected]"
] | |
20b361ed82e3c4f5ca631042f72ead83915be1a7
|
46ae8264edb9098c9875d2a0a508bc071201ec8b
|
/res/scripts/clientclientarena.py
|
694ad470a4f1b89adc4300277077ab1452ac612e
|
[] |
no_license
|
Difrex/wotsdk
|
1fc6156e07e3a5302e6f78eafdea9bec4c897cfb
|
510a34c67b8f4c02168a9830d23f5b00068d155b
|
refs/heads/master
| 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,496 |
py
|
# Embedded file name: scripts/client/ClientArena.py
import Math
import BigWorld
import ResMgr
import ArenaType
from items import vehicles
import constants
import cPickle
import zlib
import Event
from constants import ARENA_PERIOD, ARENA_UPDATE, FLAG_STATE
from PlayerEvents import g_playerEvents
from debug_utils import *
from CTFManager import g_ctfManager
from helpers.EffectsList import FalloutDestroyEffect
import arena_components.client_arena_component_assembler as assembler
class ClientArena(object):
__onUpdate = {ARENA_UPDATE.VEHICLE_LIST: '_ClientArena__onVehicleListUpdate',
ARENA_UPDATE.VEHICLE_ADDED: '_ClientArena__onVehicleAddedUpdate',
ARENA_UPDATE.PERIOD: '_ClientArena__onPeriodInfoUpdate',
ARENA_UPDATE.STATISTICS: '_ClientArena__onStatisticsUpdate',
ARENA_UPDATE.VEHICLE_STATISTICS: '_ClientArena__onVehicleStatisticsUpdate',
ARENA_UPDATE.VEHICLE_KILLED: '_ClientArena__onVehicleKilled',
ARENA_UPDATE.AVATAR_READY: '_ClientArena__onAvatarReady',
ARENA_UPDATE.BASE_POINTS: '_ClientArena__onBasePointsUpdate',
ARENA_UPDATE.BASE_CAPTURED: '_ClientArena__onBaseCaptured',
ARENA_UPDATE.TEAM_KILLER: '_ClientArena__onTeamKiller',
ARENA_UPDATE.VEHICLE_UPDATED: '_ClientArena__onVehicleUpdatedUpdate',
ARENA_UPDATE.COMBAT_EQUIPMENT_USED: '_ClientArena__onCombatEquipmentUsed',
ARENA_UPDATE.RESPAWN_AVAILABLE_VEHICLES: '_ClientArena__onRespawnAvailableVehicles',
ARENA_UPDATE.RESPAWN_COOLDOWNS: '_ClientArena__onRespawnCooldowns',
ARENA_UPDATE.RESPAWN_RANDOM_VEHICLE: '_ClientArena__onRespawnRandomVehicle',
ARENA_UPDATE.RESPAWN_RESURRECTED: '_ClientArena__onRespawnResurrected',
ARENA_UPDATE.FLAG_TEAMS: '_ClientArena__onFlagTeamsReceived',
ARENA_UPDATE.FLAG_STATE_CHANGED: '_ClientArena__onFlagStateChanged',
ARENA_UPDATE.INTERACTIVE_STATS: '_ClientArena__onInteractiveStats',
ARENA_UPDATE.DISAPPEAR_BEFORE_RESPAWN: '_ClientArena__onDisappearVehicleBeforeRespawn',
ARENA_UPDATE.RESOURCE_POINT_STATE_CHANGED: '_ClientArena__onResourcePointStateChanged',
ARENA_UPDATE.OWN_VEHICLE_INSIDE_RP: '_ClientArena__onOwnVehicleInsideRP',
ARENA_UPDATE.OWN_VEHICLE_LOCKED_FOR_RP: '_ClientArena__onOwnVehicleLockedForRP'}
def __init__(self, arenaUniqueID, arenaTypeID, arenaBonusType, arenaGuiType, arenaExtraData, weatherPresetID):
self.__vehicles = {}
self.__vehicleIndexToId = {}
self.__positions = {}
self.__statistics = {}
self.__periodInfo = (ARENA_PERIOD.WAITING,
0,
0,
None)
self.__eventManager = Event.EventManager()
em = self.__eventManager
self.onNewVehicleListReceived = Event.Event(em)
self.onVehicleAdded = Event.Event(em)
self.onVehicleUpdated = Event.Event(em)
self.onPositionsUpdated = Event.Event(em)
self.onPeriodChange = Event.Event(em)
self.onNewStatisticsReceived = Event.Event(em)
self.onVehicleStatisticsUpdate = Event.Event(em)
self.onVehicleKilled = Event.Event(em)
self.onAvatarReady = Event.Event(em)
self.onTeamBasePointsUpdate = Event.Event(em)
self.onTeamBaseCaptured = Event.Event(em)
self.onTeamKiller = Event.Event(em)
self.onCombatEquipmentUsed = Event.Event(em)
self.onRespawnAvailableVehicles = Event.Event(em)
self.onRespawnCooldowns = Event.Event(em)
self.onRespawnRandomVehicle = Event.Event(em)
self.onRespawnResurrected = Event.Event(em)
self.onInteractiveStats = Event.Event(em)
self.onVehicleWillRespawn = Event.Event(em)
self.arenaUniqueID = arenaUniqueID
self.arenaType = ArenaType.g_cache.get(arenaTypeID, None)
if self.arenaType is None:
LOG_ERROR('Arena ID not found ', arenaTypeID)
self.bonusType = arenaBonusType
self.guiType = arenaGuiType
self.extraData = arenaExtraData
self.__arenaBBCollider = None
self.__spaceBBCollider = None
self.componentSystem = assembler.createComponentSystem(self.bonusType)
return
vehicles = property(lambda self: self.__vehicles)
positions = property(lambda self: self.__positions)
statistics = property(lambda self: self.__statistics)
period = property(lambda self: self.__periodInfo[0])
periodEndTime = property(lambda self: self.__periodInfo[1])
periodLength = property(lambda self: self.__periodInfo[2])
periodAdditionalInfo = property(lambda self: self.__periodInfo[3])
def destroy(self):
self.__eventManager.clear()
assembler.destroyComponentSystem(self.componentSystem)
def update(self, updateType, argStr):
delegateName = self.__onUpdate.get(updateType, None)
if delegateName is not None:
getattr(self, delegateName)(argStr)
self.componentSystem.update(updateType, argStr)
return
def updatePositions(self, indices, positions):
self.__positions.clear()
lenPos = indices and len(positions)
lenInd = len(indices)
if not lenPos == 2 * lenInd:
raise AssertionError
indexToId = self.__vehicleIndexToId
for i in xrange(0, lenInd):
if indices[i] in indexToId:
positionTuple = (positions[2 * i], 0, positions[2 * i + 1])
self.__positions[indexToId[indices[i]]] = positionTuple
self.onPositionsUpdated()
def collideWithArenaBB(self, start, end):
if self.__arenaBBCollider is None:
if not self.__setupBBColliders():
return
return self.__arenaBBCollider.collide(start, end)
def collideWithSpaceBB(self, start, end):
if self.__spaceBBCollider is None:
if not self.__setupBBColliders():
return
return self.__spaceBBCollider.collide(start, end)
def __setupBBColliders(self):
if BigWorld.wg_getSpaceBounds().length == 0.0:
return False
arenaBB = self.arenaType.boundingBox
spaceBB = _convertToList(BigWorld.wg_getSpaceBounds())
self.__arenaBBCollider = _BBCollider(arenaBB, (-500.0, 500.0))
self.__spaceBBCollider = _BBCollider(spaceBB, (-500.0, 500.0))
return True
def __onVehicleListUpdate(self, argStr):
list = cPickle.loads(zlib.decompress(argStr))
vehicles = self.__vehicles
vehicles.clear()
for infoAsTuple in list:
id, info = self.__vehicleInfoAsDict(infoAsTuple)
vehicles[id] = info
self.__rebuildIndexToId()
self.onNewVehicleListReceived()
def __onVehicleAddedUpdate(self, argStr):
infoAsTuple = cPickle.loads(zlib.decompress(argStr))
id, info = self.__vehicleInfoAsDict(infoAsTuple)
self.__vehicles[id] = info
self.__rebuildIndexToId()
self.onVehicleAdded(id)
def __onVehicleUpdatedUpdate(self, argStr):
infoAsTuple = cPickle.loads(zlib.decompress(argStr))
id, info = self.__vehicleInfoAsDict(infoAsTuple)
self.__vehicles[id] = info
self.onVehicleUpdated(id)
def __onPeriodInfoUpdate(self, argStr):
self.__periodInfo = cPickle.loads(zlib.decompress(argStr))
self.onPeriodChange(*self.__periodInfo)
g_playerEvents.onArenaPeriodChange(*self.__periodInfo)
def __onStatisticsUpdate(self, argStr):
self.__statistics = {}
statList = cPickle.loads(zlib.decompress(argStr))
for s in statList:
vehicleID, stats = self.__vehicleStatisticsAsDict(s)
self.__statistics[vehicleID] = stats
self.onNewStatisticsReceived()
def __onVehicleStatisticsUpdate(self, argStr):
vehicleID, stats = self.__vehicleStatisticsAsDict(cPickle.loads(zlib.decompress(argStr)))
self.__statistics[vehicleID] = stats
self.onVehicleStatisticsUpdate(vehicleID)
def __onVehicleKilled(self, argStr):
victimID, killerID, equipmentID, reason = cPickle.loads(argStr)
vehInfo = self.__vehicles.get(victimID, None)
if vehInfo is not None:
vehInfo['isAlive'] = False
self.onVehicleKilled(victimID, killerID, equipmentID, reason)
return
def __onAvatarReady(self, argStr):
vehicleID = cPickle.loads(argStr)
vehInfo = self.__vehicles.get(vehicleID, None)
if vehInfo is not None:
vehInfo['isAvatarReady'] = True
self.onAvatarReady(vehicleID)
return
def __onBasePointsUpdate(self, argStr):
team, baseID, points, timeLeft, invadersCnt, capturingStopped = cPickle.loads(argStr)
self.onTeamBasePointsUpdate(team, baseID, points, timeLeft, invadersCnt, capturingStopped)
def __onBaseCaptured(self, argStr):
team, baseID = cPickle.loads(argStr)
self.onTeamBaseCaptured(team, baseID)
def __onTeamKiller(self, argStr):
vehicleID = cPickle.loads(argStr)
vehInfo = self.__vehicles.get(vehicleID, None)
if vehInfo is not None:
vehInfo['isTeamKiller'] = True
self.onTeamKiller(vehicleID)
return
def __onCombatEquipmentUsed(self, argStr):
shooterID, equipmentID = cPickle.loads(argStr)
self.onCombatEquipmentUsed(shooterID, equipmentID)
def __onRespawnAvailableVehicles(self, argStr):
vehsList = cPickle.loads(zlib.decompress(argStr))
self.onRespawnAvailableVehicles(vehsList)
LOG_DEBUG_DEV('[RESPAWN] onRespawnAvailableVehicles', vehsList)
def __onRespawnCooldowns(self, argStr):
cooldowns = cPickle.loads(zlib.decompress(argStr))
self.onRespawnCooldowns(cooldowns)
def __onRespawnRandomVehicle(self, argStr):
respawnInfo = cPickle.loads(zlib.decompress(argStr))
self.onRespawnRandomVehicle(respawnInfo)
def __onRespawnResurrected(self, argStr):
respawnInfo = cPickle.loads(zlib.decompress(argStr))
self.onRespawnResurrected(respawnInfo)
def __onDisappearVehicleBeforeRespawn(self, argStr):
vehID = cPickle.loads(argStr)
FalloutDestroyEffect.play(vehID)
self.onVehicleWillRespawn(vehID)
def __onFlagTeamsReceived(self, argStr):
data = cPickle.loads(argStr)
LOG_DEBUG('[FLAGS] flag teams', data)
g_ctfManager.onFlagTeamsReceived(data)
def __onFlagStateChanged(self, argStr):
data = cPickle.loads(argStr)
LOG_DEBUG('[FLAGS] flag state changed', data)
g_ctfManager.onFlagStateChanged(data)
def __onResourcePointStateChanged(self, argStr):
data = cPickle.loads(argStr)
LOG_DEBUG('[RESOURCE POINTS] state changed', data)
g_ctfManager.onResourcePointStateChanged(data)
def __onOwnVehicleInsideRP(self, argStr):
pointInfo = cPickle.loads(argStr)
LOG_DEBUG('[RESOURCE POINTS] own vehicle inside point', pointInfo)
g_ctfManager.onOwnVehicleInsideRP(pointInfo)
def __onOwnVehicleLockedForRP(self, argStr):
unlockTime = cPickle.loads(argStr)
LOG_DEBUG('[RESOURCE POINTS] own vehicle is locked', unlockTime)
g_ctfManager.onOwnVehicleLockedForRP(unlockTime)
def __onInteractiveStats(self, argStr):
stats = cPickle.loads(zlib.decompress(argStr))
self.onInteractiveStats(stats)
LOG_DEBUG_DEV('[RESPAWN] onInteractiveStats', stats)
def __rebuildIndexToId(self):
vehicles = self.__vehicles
self.__vehicleIndexToId = dict(zip(range(len(vehicles)), sorted(vehicles.keys())))
def __vehicleInfoAsDict(self, info):
getVehicleType = lambda cd: (None if cd is None else vehicles.VehicleDescr(compactDescr=cd))
infoAsDict = {'vehicleType': getVehicleType(info[1]),
'name': info[2],
'team': info[3],
'isAlive': info[4],
'isAvatarReady': info[5],
'isTeamKiller': info[6],
'accountDBID': info[7],
'clanAbbrev': info[8],
'clanDBID': info[9],
'prebattleID': info[10],
'isPrebattleCreator': bool(info[11]),
'forbidInBattleInvitations': bool(info[12]),
'events': info[13],
'igrType': info[14],
'potapovQuestIDs': info[15]}
return (info[0], infoAsDict)
def __vehicleStatisticsAsDict(self, stats):
return (stats[0], {'frags': stats[1]})
def _convertToList(vec4):
return ((vec4.x, vec4.y), (vec4.z, vec4.w))
def _pointInBB(bottomLeft2D, upperRight2D, point3D, minMaxHeight):
return bottomLeft2D[0] < point3D[0] < upperRight2D[0] and bottomLeft2D[1] < point3D[2] < upperRight2D[1] and minMaxHeight[0] < point3D[1] < minMaxHeight[1]
class _BBCollider():
def __init__(self, bb, heightLimits):
self.__planes = list()
self.__bb = bb
self.__heightLimits = heightLimits
self.__planes.append(Plane(Math.Vector3(0.0, 0.0, 1.0), bb[0][1]))
self.__planes.append(Plane(Math.Vector3(0.0, 0.0, -1.0), -bb[1][1]))
self.__planes.append(Plane(Math.Vector3(1.0, 0.0, 0.0), bb[0][0]))
self.__planes.append(Plane(Math.Vector3(-1.0, 0.0, 0.0), -bb[1][0]))
self.__planes.append(Plane(Math.Vector3(0.0, 1.0, 0.0), heightLimits[0]))
self.__planes.append(Plane(Math.Vector3(0.0, -1.0, 0.0), -heightLimits[1]))
def collide(self, start, end):
if not _pointInBB(self.__bb[0], self.__bb[1], end, self.__heightLimits):
finalPoint = None
dist = 0
for plane in self.__planes:
intersecPoint = plane.intersectSegment(start, end)
if intersecPoint:
tmpDist = (intersecPoint - start).length
if tmpDist < dist or dist == 0:
dist = tmpDist
finalPoint = intersecPoint
if finalPoint is not None:
return finalPoint
else:
return start
return
class Plane():
def __init__(self, n, d):
self.n = n
self.d = d
def intersectSegment(self, a, b):
ab = b - a
normalDotDir = self.n.dot(ab)
if normalDotDir == 0:
return None
else:
t = (self.d - self.n.dot(a)) / normalDotDir
if t >= 0.0 and t <= 1.0:
return a + ab.scale(t)
return None
def testPoint(self, point):
if self.n.dot(point) - self.d >= 0.0:
return True
return False
|
[
"[email protected]"
] | |
e22bff52efe6faa36581c0805d4918dfa3d5452f
|
ba9ec6cbfa9d32a5e1e876395c6958debe7475a6
|
/random/ReadDict-panda.py
|
c05a5d39c8e4e0b3600d0902eb6bc5ffca352839
|
[] |
no_license
|
jadavsk/Python
|
8481094b0d9c8a762e727f2955d36c4a7a51f748
|
8aa4e0e84d0b53868422c3a6439c466d80b86d75
|
refs/heads/main
| 2023-03-02T03:25:28.348063 | 2021-02-11T01:00:15 | 2021-02-11T01:00:15 | 318,917,346 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98],
"Developed" : ["No","No","No","No","No"]
}
import pandas as pd
brics = pd.DataFrame(dict)
brics.index = ["BR", "RU", "IN", "CH", "SA"]
print(brics)
rcs = pd.read_csv("E:/test10rows.csv")
print(rcs)
#print(rcs[[['id','name','rank']]])
print(rcs[4:-2])
|
[
"[email protected]"
] | |
4ca86184e584fc3bb1adf52144a6639fb2e8a214
|
6988f0e240b177c6e29ed9892d27c108dc86c490
|
/ACGCharacterDetector/__init__.py
|
94df5631e6f69a7924cb719b6b74b18ebacd5d6d
|
[] |
no_license
|
zjulzy/ACGCharacterDetector
|
351f45075f06eabe975e2564e22908eadd83614c
|
dc259529a397b26e928f5d1849b1188d7c2c11b3
|
refs/heads/main
| 2023-07-11T02:12:19.092055 | 2021-08-11T04:48:38 | 2021-08-11T04:48:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,543 |
py
|
import io
from logging import log
from PIL import Image
from flask_cors import CORS
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
CORS(app)
# 读取配置文件
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
app.config['YOLO_PATH'] = config['YOLO']['yolo_path']
app.config['YOLO_MODEL'] = config['YOLO']['yolo_model']
app.config['RESNET18_MODEL'] = config['RESNET18']['resnet18_model']
app.config['RESNET18_LABELS'] = config['RESNET18']['resnet18_labels']
app.config['RESNET18_TRANS'] = config['RESNET18']['resnet18_translate_dict']
# 不可把这个调到前面,因为 acgmodel 的初始化需要配置信息
from .models import yolov5, resnet18
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/detect', methods=['POST'])
def detect():
"""识别动漫人物头像位置并识别人物
Returns:
json: {'result':[
[x,y,x,y,probability,name],
...
]}
"""
if request.method == 'POST':
# 读取传来的图片
file = request.files['image']
imagebytes = file.read()
image = Image.open(io.BytesIO(imagebytes))
boxes = __detect(image)
result = __recognize(image, boxes)
return jsonify(result)
return jsonify([
# [x,y,x,y,probability]
{
"box":[1,1,3,3,0.9],
"name": "name",
"trans": "trans"
}
])
def __detect(image: Image):
"""识别图中头像的位置
Args:
image (Image): PIL.Image对象,被识别的图片
Returns:
list: 识别到的头像框位置[(x,y,x,y,probability,class),...]
"""
boxes = yolov5(image).xyxyn[0]
# app.logger.debug(type(box))
# app.logger.debug(box)
return boxes.tolist()
def __recognize(image: Image, boxes: list):
"""识别头像人物
Args:
image (Image): 原始图片
boxes (list): 头像位置
Returns:
list: 识别到的头像框位置及人物名字[(x,y,x,y,probability,name),...]
"""
result = []
for box in boxes:
width, height = image.size
head_box = (
width * box[0], height * box[1],
width * box[2], height * box[3]
)
head_image = image.crop(head_box)
name,trans = resnet18(head_image)
result.append({
"box":box[:5],
"name":name,
"trans":trans
})
app.logger.debug(result)
return result
|
[
"[email protected]"
] | |
808b66ff6e5f2259bf63491d70c20bb57616c92b
|
20e7b2997f4dd90ec4cea01124d70ee5bcf9b58f
|
/server/api/zone.py
|
34ead4e61adb572ed1ed95c9da1cb40f7fc10376
|
[] |
no_license
|
baden/gps-maps27
|
77f87738984d3c711697bdbb380eeb039a1096eb
|
defe2925e6b35da5df6b546fd15e645d10d1a5b4
|
refs/heads/master
| 2021-03-12T23:52:22.735079 | 2014-01-29T12:31:20 | 2014-01-29T12:31:20 | 32,263,820 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,837 |
py
|
# -*- coding: utf-8 -*-
from core import BaseApi
import json
import logging
from google.appengine.ext import db
class Add(BaseApi):
#requred = ('akey')
def parcer(self):
from datamodel.zone import DBZone
#points = self.request.get("points", None)
ztype = self.request.get('type', 'polygon')
points = json.loads(self.request.get('points', '[]'))
zkey = self.request.get('zkey', None)
bounds = json.loads(self.request.get('bounds', '[[0.0,0.0],[0.0,0.0]'))
#zkey = DBZone.addZone(ztype, [db.GeoPt(lat=p[0], lon=p[1]) for p in points])
zkey = DBZone.addZone(ztype, points, zkey=zkey, bounds=bounds)
return {
"answer": "ok",
"points": points,
"zkey": str(zkey)
}
class Get(BaseApi):
#requred = ('akey')
def parcer(self):
from datamodel.zone import DBZone
#points = self.request.get("points", None)
#points = json.loads(self.request.get('points', '[]'))
skey = self.request.get("skey", None)
zones = DBZone.getZones().fetch(100) #DBZone.all().fetch(1000)
zlist = {}
for zone in zones:
zlist[str(zone.key())] ={
'zkey': str(zone.key()),
'type': zone.ztype_name,
'points': [(p.lat, p.lon) for p in zone.points],
'radius': zone.radius,
'owner': zone.owner.nickname(),
'private': zone.private,
'options': zone.options,
'name': zone.name,
'address': zone.address
}
if zones:
return {
"answer": "ok",
"zones": zlist
}
else:
return {
"answer": "no"
}
class Del(BaseApi):
#requred = ('akey')
def parcer(self):
from datamodel.zone import DBZone
zkey = self.request.get("zkey", None)
try:
db.delete(db.Key(zkey))
except db.datastore_errors.BadKeyError, e:
return {'answer': 'no', 'reason': 'account key error', 'comments': '%s' % e}
return {'answer': 'ok'}
class Info(BaseApi):
#requred = ('account')
def parcer(self, **argw):
from datamodel.zone import DBZone
zkey = db.Key(self.request.get("zkey", None))
from datamodel.channel import inform
from datamodel.namespace import private
import pickle
if self.request.get('cmd', '') == 'get':
q = DBZone.get(zkey)
if q is not None:
info = {
'id': q.key().namespace() + ':' + str(q.key().id_or_name()),
'name': q.name,
'address': q.address,
'active': q.active and 'checked' or '',
'desc': q.desc,
'comments': q.comments,
}
else:
info = {
}
elif self.request.get('cmd', '') == 'set':
info = {'set': 'set', 'params': self.request.POST.items()}
z = DBZone.get(zkey)
items = dict(self.request.POST.items())
logging.info("set zone datas: %s" % repr(items))
if z is not None:
logging.info("z: %s" % repr(z))
if 'name' in items:
z.name = items["name"]
if 'address' in items:
z.address = items["address"]
if 'desc' in items:
z.desc = items["desc"]
if 'comments' in items:
z.comments = items["comments"]
z.save()
#for (k, v) in items.iteritems():
# pass
"""
DBZone.set( self.skey,
number = self.request.POST['number'],
model = self.request.POST['model'],
year = self.request.POST['year'],
drive = self.request.POST['drive'],
vin = self.request.POST['vin'],
teh = self.request.POST['teh'],
casco = self.request.POST['casco'],
comments = self.request.POST['comments']
)
"""
else:
return {'result': 'error', 'reason': 'unknown operation'}
return {'result': 'ok', 'zkey': str(zkey), 'info': info}
class Rule_Create(BaseApi):
def parcer(self):
return {'answer': 'ok'}
class Rule_Get(BaseApi):
def parcer(self):
return {'answer': 'ok'}
class Rule_Del(BaseApi):
def parcer(self):
return {'answer': 'ok'}
|
[
"[email protected]@cd201f0b-5521-6f96-0748-8efd02dae0ad"
] |
[email protected]@cd201f0b-5521-6f96-0748-8efd02dae0ad
|
a1e1777cc3e85074a7ca11f4d2a017bf604cf336
|
dcb125099565f438a7a6f9c064abf4041fed14dc
|
/ex31.py
|
a6b438f6cb56c19ca7a7ea77c94a12aa3c31f458
|
[] |
no_license
|
SecZhujun/Py104
|
72794c57f5506e031fb11e786f50949b24bea8b5
|
966220b69588492d8d371935fb344d2e85544825
|
refs/heads/master
| 2020-12-30T10:11:23.345304 | 2017-08-08T13:59:37 | 2017-08-08T13:59:37 | 99,239,528 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,064 |
py
|
print("You enter a dark room with two doors. Do you go through door #1 or door #2?")
door = input("> ")
if door == "1":
print("There's a giant bear here eating a cheese cake. What do you do?")
print("1. Take the cake.")
print("2. Scream at the bear.")
bear = input("> ")
if bear == "1":
print("The bear eats your face off. Good job!")
elif bear == "2":
print("The bear eats your legs off. Good job!")
else:
print("Well, doing %s is probably better. Bear runs away." % bear)
elif door == "2":
print("You stare into the endless abyss at Cthulhu's retina.")
print("1. Blueberries.")
print("2. Yellow jacket clothespins.")
print("3. Understanding revolvers yelling melodies.")
insanity = input("> ")
if insanity == "1" or insanity == "2":
print("Your body survives powered by a mind of jello. Good job!")
else:
print("The insanity rots your eyes into a pool of muck. Good job!")
else:
print("You stumble around and fall on a knife and die. Good job!")
|
[
"[email protected]"
] | |
98c846f9ae6a523c8f2dd83d8d296e2c0eb068ae
|
6e1334b12fa0549264dc8536a8383810668be598
|
/train_model/__init__.py
|
4bea6f1fe82ed6f351f1a5155576e1c48d012d49
|
[] |
no_license
|
FireJohnny/Attention_domain_sentiment_analysis
|
460bec4c064d0933eb81cbcad431e4db42156bb7
|
9ef4621e7add164ff5c1c4f8803c495c1eeb7492
|
refs/heads/master
| 2020-03-23T01:47:30.371808 | 2018-08-02T06:56:07 | 2018-08-02T06:56:07 | 140,937,591 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 262 |
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
__author__ = 'FireJohnny'
@license: Apache Licence
@file: __init__.py.py
@time: 2018/6/10 12:21
"""
def func():
pass
class Main():
def __init__(self):
pass
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
0075b6d56de2872e38a7499b06627a80063c537b
|
26654298576be6897f2ba37af7006644c71aca89
|
/products/admin.py
|
652c33f845da77f37b5b5691ff759d15bb420a67
|
[] |
no_license
|
EhsanOrandi/Online-Market
|
dd826f200a844d3a8da56306d67e6065a41b1abc
|
e65ef957b246b7346dbc98d192212abfd94abcd9
|
refs/heads/main
| 2023-03-21T15:53:42.736458 | 2021-03-03T17:05:39 | 2021-03-03T17:05:39 | 323,933,624 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,256 |
py
|
from django.contrib import admin
from .models import Category, Brand, Product, ShopProduct, Comment, Image, ProductMeta
# Register your models here.
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'details', 'parent')
search_fields = ('name', 'slug')
list_filter = ('parent',)
@admin.register(Brand)
class BrandAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
search_fields = ('name', 'slug')
class ImageItemInline(admin.TabularInline):
model = Image
class ProductMetaItemInline(admin.TabularInline):
model = ProductMeta
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'brand', 'category')
search_fields = ('name', 'slug')
list_filter = ('brand', 'category')
inlines = [ImageItemInline, ProductMetaItemInline]
@admin.register(ShopProduct)
class ShopProductAdmin(admin.ModelAdmin):
list_display = ('shop', 'product', 'price', 'quantity')
search_fields = ('product',)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('user', 'product', 'text', 'rate')
search_fields = ('user', 'product')
list_filter = ('product',)
date_hierarchy = 'created_at'
|
[
"[email protected]"
] | |
2353f630e1b4c55d06c5cd4999817f083f38db7b
|
5463ff580e270799ba2a2c33bbcec6fc03c680d1
|
/assignment1/cs231n/classifiers/softmax.py
|
dc69bb4085ac6a386c33e8d762b5a5b63953de82
|
[] |
no_license
|
vedantthapa/cs231n-solutions
|
d2bdfca72b9de7c6806aa71c4cf37227b92b5568
|
faa78c245b12b24760189f7a5685c97ef39c21e7
|
refs/heads/master
| 2023-06-01T19:03:08.642427 | 2021-05-28T15:43:42 | 2021-05-28T15:43:42 | 369,722,183 | 0 | 0 | null | 2021-05-22T17:57:25 | 2021-05-22T05:19:53 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,373 |
py
|
from builtins import range
import numpy as np
from random import shuffle
from past.builtins import xrange
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
num_train = X.shape[0]
num_classes = W.shape[1]
for i in range(num_train):
scores = X[i].dot(W)
f = scores - scores.max()
softmax = np.exp(f) / np.sum(np.exp(f))
correct_class_score = softmax[y[i]]
loss += -np.log(correct_class_score)
for j in range(num_classes):
dW[:, j] += X[i] * softmax[j]
dW[:, y[i]] -= X[i]
loss /= num_train
dW /= num_train
loss += reg * np.sum(W * W)
dW += 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
num_train = X.shape[0]
num_classes = W.shape[1]
scores = X.dot(W)
scores -= np.max(scores, axis=1, keepdims=True)
sum_exps = np.sum(np.exp(scores), axis=1, keepdims=True)
softmax_matrix = np.exp(scores) / sum_exps
loss = np.sum(-np.log(softmax_matrix[np.arange(num_train), y]))
softmax_matrix[np.arange(num_train), y] -= 1
dW = X.T.dot(softmax_matrix)
loss /= num_train
dW /= num_train
loss += reg * np.sum(W * W)
dW += 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
|
[
"[email protected]"
] | |
9db73616056bed06a9c8484c5aea2920e6c7b81e
|
421b0ae45f495110daec64ed98c31af525585c2c
|
/PythonProgramsTraining/graphics/frame1.py
|
c0c8e6a93f60c197702ad936f518643ad8a67d1b
|
[] |
no_license
|
Pradeepsuthar/pythonCode
|
a2c87fb64c79edd11be54c2015f9413ddce246c4
|
14e2b397f69b3fbebde5b3af98898c4ff750c28c
|
refs/heads/master
| 2021-02-18T05:07:40.402466 | 2020-03-05T13:14:15 | 2020-03-05T13:14:15 | 245,163,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
import tkinter as tk
from tkinter import messagebox
def area():
'to calculate area'
len = float(tfLen.get())
wid = float(tfWidth.get())
result = len*wid
tfArea.insert(0,result)
# Showing massage box
messagebox.showinfo("Info MAss ", "Area is : "+str(result)+" CM")
# creating a frame
frame = tk.Tk()
frame.geometry("200x200")
#Creating controls
tfLen = tk.Entry(frame)
tfWidth = tk.Entry(frame)
tfArea = tk.Entry(frame)
btn = tk.Button(frame, text="Calculate Area", command=area)
# Adding components on frame
tfLen.pack()
tfWidth.pack()
tfArea.pack()
btn.pack()
# Showing frame
frame.mainloop()
|
[
"[email protected]"
] | |
cad334329fd8492c437cdc851735130c1e0f1e6a
|
22dd98f5e63ec77f279b067fc8baa51bc53e01b4
|
/stocksite/stocksite/wsgi.py
|
afade811f89688c76918677f21ce8f050f1385ef
|
[] |
no_license
|
crystalyang/StockPortfolioSuggestionEngine
|
05ea0cde89762340fb8570fce3db8c6888664c9f
|
41fb9fce0637ecaaf8d2fe42c5d24a302959089a
|
refs/heads/master
| 2020-07-13T07:18:53.971647 | 2016-12-10T01:58:47 | 2016-12-10T01:58:47 | 73,889,287 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 395 |
py
|
"""
WSGI config for stocksite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stocksite.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
2f03ef0c87909a7ddf885f12cf909aa7a187d78f
|
2835f367eb7c521987bdd1cbb309be9d55d77364
|
/HDM/urls.py
|
516f3f0f960b48689ab9b3780abf9c3952db47ce
|
[] |
no_license
|
guneet-batra17/HDM
|
d1534fcfd8357368cfa7ad6c250ec5ec21cfa4d2
|
c594053cc8cabeaedcbe74be5567b06b2c1cda86
|
refs/heads/master
| 2020-04-28T15:20:55.084015 | 2019-03-13T07:41:08 | 2019-03-13T07:41:08 | 175,369,613 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,070 |
py
|
"""HDM URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from manager import views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url,include
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^',include('manager.urls')),
url(r'^',include('departments.urls')),
url(r'^',include('doctor.urls'))
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
c7fad03eaae8d4aad5a35d208cb71239fb81131f
|
86468928bc86d6551d85ea1b4fb643d4f99f5952
|
/Day04_20190905/use_attention_model_for_IC.py
|
d22af1c305533e567ed256ab177efd749e1b79ef
|
[
"MIT"
] |
permissive
|
Magicboomliu/liuzihua_PKU_intern
|
4788b0e83f101d39dce61017ecce260b11ce7138
|
6d7c8cee49ad0c9471b184432a64f2cebea0d6c2
|
refs/heads/master
| 2020-07-17T14:41:26.551920 | 2019-09-14T08:31:51 | 2019-09-14T08:31:51 | 206,037,769 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,764 |
py
|
__author__ = "Luke Liu"
#encoding="utf-8"
# First input following modules
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from imageio import imread
import scipy.io
import cv2
import os
import json
# tqdm是python的一个进度条的库,用来显示进度长的长度
from tqdm import tqdm
import pickle
# 规定相关的参数
# 其中maxlen是规定每一个image caption的长度,超过20的把它长度缩小到20,方便进行LSTM处理
batch_size = 128
maxlen = 20
image_size = 224
#VGG19通道均值
MEAN_VALUES = np.array([123.68, 116.779, 103.939]).reshape((1, 1, 3))
'''
Step1 加载一些图片,对应的描述信息以
'''
# 读取数据,1.输入图片的文件夹 2.注解的文件夹
# 返回的数值有id list以及 描述 信息的list,以及id 与 image的array信息组成的字典
def load_data(image_dir, annotation_path):
# 读取注解的信息
with open(annotation_path, 'r') as fr:
# 读取注解的json文件
annotation = json.load(fr)
# 要标记图片的id以对应图片的描述,将id与对应的描述写入一个字典中
ids = []
captions = []
image_dict = {}
# 可以使用进度掉来显示
for i in tqdm(range(len(annotation['annotations']))):
# 获得一个注解的信息
item = annotation['annotations'][i]
# 获得注解中的描述信息,将所有的小写,而且去除换行信心
caption = item['caption'].strip().lower()
#将所有的标点以及特殊的符号换成一个空格
caption = caption.replace('.', '').replace(',', '').replace("'", '').replace('"', '')
caption = caption.replace('&', 'and').replace('(', '').replace(')', '').replace('-', ' ').split()
# 将caption中的单词写入一个列表中,如果这个单词大于0
#放置一个空格进去
caption = [w for w in caption if len(w) > 0]
#如果这个caption 的长度小于20,保留这个图片与描述信息,写入列表
if len(caption) <= maxlen:
#而且这张图片image_id若如果没有读取过的话,
if not item['image_id'] in image_dict:
#读取这个信息,array的iamge信息
img = imread(image_dir + '%012d.jpg' % item['image_id'])
#获得图片的大小
h = img.shape[0]
w = img.shape[1]
#将图片转化成正方形,保留最主要的部分,不用插值法
if h > w:
img = img[h // 2 - w // 2: h // 2 + w // 2, :]
else:
img = img[:, w // 2 - h // 2: w // 2 + h // 2]
# 然后将图片转化成规定的大小的正方形
img = cv2.resize(img, (image_size, image_size))
# 不排除存在黑吧的图片,为其增加一个维度
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
# 然后将其的channel变为3
img = np.concatenate([img, img, img], axis=-1)
#将处理后的图片放入image_dict中,一个item[id]对应一个image array
image_dict[item['image_id']] = img
#然后在id中添加id信息
ids.append(item['image_id'])
#在caption中加入caption信息
captions.append(caption)
return ids, captions, image_dict
# training文件的jason文件,这个json文件中有annotation的信息
train_json = 'data/train/captions_train2014.json'
train_ids, train_captions, train_dict = load_data('data/train/images/COCO_train2014_', train_json)
# 看一下满足条件的(描述文字<20)的图片序号
print(len(train_ids))
# 这段代码主要来查看一下id对应的一些图片以及相应的caption信息(选择执行)
# data_index = np.arange(len(train_ids))
# np.random.shuffle(data_index)
# N = 4
# data_index = data_index[:N]
# plt.figure(figsize=(12, 20))
# for i in range(N):
# caption = train_captions[data_index[i]]
# img = train_dict[train_ids[data_index[i]]]
# plt.subplot(4, 1, i + 1)
# plt.imshow(img)
# plt.title(' '.join(caption))
# plt.axis('off')
'''
Step2 建立一个词汇对照表,词汇到id,id到词汇
'''
# 建立一个词汇的字典
vocabulary = {}
#对每一个caption中可能出现的单词频率(次数)变成对应编号
for caption in train_captions:
for word in caption:
vocabulary[word] = vocabulary.get(word, 0) + 1
#将这个词汇字典点进行排序,按照从大到小的顺序进行排列
vocabulary = sorted(vocabulary.items(), key=lambda x:-x[1])
# 获得对应的词汇表(从大到小)
vocabulary = [w[0] for w in vocabulary]
# 定义一些特殊的符号
word2id = {'<pad>': 0, '<start>': 1, '<end>': 2}
# 把刚才的一些词汇信息加入到word2id字典中去,从标号3开始
#这样word2id前3个是特殊的词汇,后面开始就是词汇表
for i, w in enumerate(vocabulary):
word2id[w] = i + 3
#将字典变成数字索引在前,而文字信息在后面(先出现的频率高)
id2word = {i: w for w, i in word2id.items()}
# 打印目前词汇表达大小,打印前20个高频词汇,(this is for test!)
print(len(vocabulary), vocabulary[:20])
# 报词汇表,word2id以及id2word变成pickle文件储存起来
with open('dictionary.pkl', 'wb') as fw:
pickle.dump([vocabulary, word2id, id2word], fw)
# 这可以给定的一个id列表转换为文字
def translate(ids):
words = [id2word[i] for i in ids if i >= 3]
return ' '.join(words) + '.'
#这个将描述转换为对应的id,返回一个一个[idex_of_the_caption,captions_id_reflection]的矩阵
def convert_captions(data):
result = []
# 在描述开始与描述结束分别加入特殊符号<start> <end>
for caption in data:
# vector is list
vector = [word2id['<start>']]
for word in caption:
if word in word2id:
vector.append(word2id[word])
vector.append(word2id['<end>'])
result.append(vector)
# result最后是所有caption的一个数值对应的list
#时间很长,我们建立一个进度长来看转化的进度
#如果不到22就补0,0其实就是<pad>
array = np.zeros((len(data), maxlen + 2), np.int32)
for i in tqdm(range(len(result))):
array[i, :len(result[i])] = result[i]
#将最后的结果转化为一个[idex_of_the_caption,captions_id_reflection]的矩阵
return array
#执行这个函数,把描述转化为id信息
train_captions = convert_captions(train_captions)
# show some
print("the shape of training captions is :",train_captions.shape)
print("show the first coded captions",train_captions[0])
print("if you do not know what these codes are,dont"
"not worry,here is the translation"
": ",translate(train_captions[0]))
'''
加载模型,首先使用vgg19进行特征提取
'''
#加载模型的参数矩阵
VGG_MODEL = "D:/BaiduYunDownload/python_exe/models/convs/imagenet-vgg-verydeep-19.mat"
vgg = scipy.io.loadmat(VGG_MODEL)
vgg_layers = vgg['layers']
def vgg_endpoints(inputs, reuse=None):
with tf.variable_scope('endpoints', reuse=reuse):
# 加载权重与偏置
def _weights(layer, expected_layer_name):
W = vgg_layers[0][layer][0][0][0][0][0]
b = vgg_layers[0][layer][0][0][0][0][1]
layer_name = vgg_layers[0][layer][0][0][3][0]
assert layer_name == expected_layer_name
return W, b
#定义卷积层,之后的是relu
def _conv2d_relu(prev_layer, layer, layer_name):
W, b = _weights(layer, layer_name)
W = tf.constant(W)
b = tf.constant(np.reshape(b, (b.size)))
return tf.nn.relu(tf.nn.conv2d(prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b)
#定义平均池化层
def _avgpool(prev_layer):
return tf.nn.avg_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
graph = {}
graph['conv1_1'] = _conv2d_relu(inputs, 0, 'conv1_1')
graph['conv1_2'] = _conv2d_relu(graph['conv1_1'], 2, 'conv1_2')
graph['avgpool1'] = _avgpool(graph['conv1_2'])
graph['conv2_1'] = _conv2d_relu(graph['avgpool1'], 5, 'conv2_1')
graph['conv2_2'] = _conv2d_relu(graph['conv2_1'], 7, 'conv2_2')
graph['avgpool2'] = _avgpool(graph['conv2_2'])
graph['conv3_1'] = _conv2d_relu(graph['avgpool2'], 10, 'conv3_1')
graph['conv3_2'] = _conv2d_relu(graph['conv3_1'], 12, 'conv3_2')
graph['conv3_3'] = _conv2d_relu(graph['conv3_2'], 14, 'conv3_3')
graph['conv3_4'] = _conv2d_relu(graph['conv3_3'], 16, 'conv3_4')
graph['avgpool3'] = _avgpool(graph['conv3_4'])
graph['conv4_1'] = _conv2d_relu(graph['avgpool3'], 19, 'conv4_1')
graph['conv4_2'] = _conv2d_relu(graph['conv4_1'], 21, 'conv4_2')
graph['conv4_3'] = _conv2d_relu(graph['conv4_2'], 23, 'conv4_3')
graph['conv4_4'] = _conv2d_relu(graph['conv4_3'], 25, 'conv4_4')
graph['avgpool4'] = _avgpool(graph['conv4_4'])
graph['conv5_1'] = _conv2d_relu(graph['avgpool4'], 28, 'conv5_1')
graph['conv5_2'] = _conv2d_relu(graph['conv5_1'], 30, 'conv5_2')
graph['conv5_3'] = _conv2d_relu(graph['conv5_2'], 32, 'conv5_3')
graph['conv5_4'] = _conv2d_relu(graph['conv5_3'], 34, 'conv5_4')
graph['avgpool5'] = _avgpool(graph['conv5_4'])
return graph
# 输入一张图片
X = tf.placeholder(tf.float32, [None, image_size, image_size, 3])
# 输出最后的卷积层的第5个卷积模块第三的卷积层
encoded = vgg_endpoints(X - MEAN_VALUES)['conv5_3']
#我们看一下encode的信息
print(encoded)
'''下面定义lstm部分
'''
k_initializer = tf.contrib.layers.xavier_initializer()
b_initializer = tf.constant_initializer(0.0)
e_initializer = tf.random_uniform_initializer(-1.0, 1.0)
#定义dense层
def dense(inputs, units, activation=tf.nn.tanh, use_bias=True, name=None):
return tf.layers.dense(inputs, units, activation, use_bias,
kernel_initializer=k_initializer, bias_initializer=b_initializer, name=name)
#定义BN层
def batch_norm(inputs, name):
return tf.contrib.layers.batch_norm(inputs, decay=0.95, center=True, scale=True, is_training=True,
updates_collections=None, scope=name)
#定义dropout层
def dropout(inputs):
return tf.layers.dropout(inputs, rate=0.5, training=True)
num_block = 14 * 14
num_filter = 512
hidden_size = 1024
embedding_size = 512
#之前应该是(instances,14,14,512)
# 首先将encode reshape成(instances,14*14,512)
encoded = tf.reshape(encoded, [-1, num_block, num_filter]) # batch_size, num_block, num_filter
# 正则化处理
contexts = batch_norm(encoded, 'contexts')
#此时context的shape 也是 # (batch_size, num_block, num_filter)
#输出的结果最长是22(加上了<start>和<end>)
Y = tf.placeholder(tf.int32, [None, maxlen + 2])
#
Y_in = Y[:, :-1]#前面21个
Y_out = Y[:, 1:]#从第2个到第22个
#返回一个布尔类型的张量,然后被转化为float类型,维度与Y_out一样
#word2id['<pad>’]的值是0
mask = tf.to_float(tf.not_equal(Y_out, word2id['<pad>']))
with tf.variable_scope('initialize'):
#计算均值
#消失了num_block维度,变成了所有num_block在filter维度上的均值
context_mean = tf.reduce_mean(contexts, 1)
#定义最早的状态是1024维度
state = dense(context_mean, hidden_size, name='initial_state')
#最早的记忆也是1024维度
memory = dense(context_mean, hidden_size, name='initial_memory')
#词嵌入,把所有词汇表中词汇嵌入到512维张量
with tf.variable_scope('embedding'):
embeddings = tf.get_variable('weights', [len(word2id), embedding_size], initializer=e_initializer)
# 使用tf.nn.embedding_lookup可以读取词向量
embedded = tf.nn.embedding_lookup(embeddings, Y_in)
with tf.variable_scope('projected'):
projected_contexts = tf.reshape(contexts, [-1, num_filter]) # batch_size * num_block, num_filter
#特征映射,注意这一步要以num_filter为分割,每个filer是一个feature vectors
#讲过一个dense层,所有batch中的特征累加
projected_contexts = dense(projected_contexts, num_filter, activation=None, use_bias=False,
name='projected_contexts')
#将其变化为batch_size, num_block, num_filter的形式
projected_contexts = tf.reshape(projected_contexts,
[-1, num_block, num_filter]) # batch_size, num_block, num_filter
# 首先建立一个lstm单元
lstm = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)
loss = 0
alphas = []
'''
按照次序进行词语生成
'''
for t in range(maxlen + 1):
with tf.variable_scope('attend'):
#注意力模块
h0 = dense(state, num_filter, activation=None, name='fc_state') # batch_size, num_filter
h0 = tf.nn.relu(projected_contexts + tf.expand_dims(h0, 1)) # batch_size, num_block, num_filter
h0 = tf.reshape(h0, [-1, num_filter]) # batch_size * num_block, num_filter
h0 = dense(h0, 1, activation=None, use_bias=False, name='fc_attention') # batch_size * num_block, 1
h0 = tf.reshape(h0, [-1, num_block]) # batch_size, num_block
alpha = tf.nn.softmax(h0) # batch_size, num_block
# contexts: batch_size, num_block, num_filter
# tf.expand_dims(alpha, 2): batch_size, num_block, 1
context = tf.reduce_sum(contexts * tf.expand_dims(alpha, 2), 1, name='context') # batch_size, num_filter
alphas.append(alpha)
#选择器
with tf.variable_scope('selector'):
beta = dense(state, 1, activation=tf.nn.sigmoid, name='fc_beta') # batch_size, 1
context = tf.multiply(beta, context, name='selected_context') # batch_size, num_filter
with tf.variable_scope('lstm'):
h0 = tf.concat([embedded[:, t, :], context], 1) # batch_size, embedding_size + num_filter
_, (memory, state) = lstm(inputs=h0, state=[memory, state])
#解码lstm
with tf.variable_scope('decode'):
h0 = dropout(state)
h0 = dense(h0, embedding_size, activation=None, name='fc_logits_state')
h0 += dense(context, embedding_size, activation=None, use_bias=False, name='fc_logits_context')
h0 += embedded[:, t, :]
h0 = tf.nn.tanh(h0)
h0 = dropout(h0)
#生成一个概率模型
logits = dense(h0, len(word2id), activation=None, name='fc_logits')
loss += tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y_out[:, t], logits=logits) * mask[:, t])
tf.get_variable_scope().reuse_variables()
# 构造优化器,在损失函数中加入注意力正则项,定义优化器
alphas = tf.transpose(tf.stack(alphas), (1, 0, 2)) # batch_size, maxlen + 1, num_block
alphas = tf.reduce_sum(alphas, 1) # batch_size, num_block
attention_loss = tf.reduce_sum(((maxlen + 1) / num_block - alphas) ** 2)
total_loss = (loss + attention_loss) / batch_size
with tf.variable_scope('optimizer', reuse=tf.AUTO_REUSE):
global_step = tf.Variable(0, trainable=False)
vars_t = [var for var in tf.trainable_variables() if not var.name.startswith('endpoints')]
train_op = tf.contrib.layers.optimize_loss(total_loss, global_step, 0.001, 'Adam', clip_gradients=5.0, variables=vars_t)
'''
train the model
'''
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
OUTPUT_DIR = 'model'
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# 使用了tensorboard
tf.summary.scalar('losses/loss', loss)
tf.summary.scalar('losses/attention_loss', attention_loss)
tf.summary.scalar('losses/total_loss', total_loss)
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(OUTPUT_DIR)
epochs = 20
#一共训练20论
for e in range(epochs):
train_ids, train_captions = shuffle(train_ids, train_captions)
for i in tqdm(range(len(train_ids) // batch_size)):
#定义batch的大小
X_batch = np.array([train_dict[x] for x in train_ids[i * batch_size: i * batch_size + batch_size]])
Y_batch = train_captions[i * batch_size: i * batch_size + batch_size]
_ = sess.run(train_op, feed_dict={X: X_batch, Y: Y_batch})
if i > 0 and i % 100 == 0:
#每100记录一下
writer.add_summary(sess.run(summary,
feed_dict={X: X_batch, Y: Y_batch}),
e * len(train_ids) // batch_size + i)
writer.flush()
#最后储存
saver.save(sess, os.path.join(OUTPUT_DIR, 'image_caption'))
#question how to show
|
[
"[email protected]"
] | |
4971cef2eaad9bf4b633f3ef22dd270b3856a41b
|
ffdd2e74d8f4a1a21e88ddb2e6a232b1dbcb7aea
|
/cliente.py
|
58204c37115fd88a7234a0b4a351b97880cc7563
|
[] |
no_license
|
AlanMSP/Redes-y-Protocolos-de-Servicio---Chat-Cliente-Servidor
|
e6bbf631a74d7d5bfb4deb09f93d63779b9691bb
|
065ea6966ccafa72c94b92046dff201b52f362aa
|
refs/heads/main
| 2023-01-24T01:04:37.180261 | 2020-12-06T03:28:01 | 2020-12-06T03:28:01 | 318,942,183 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,090 |
py
|
import tkinter as tk
from tkinter import simpledialog
import socket as sk
import threading as th
from time import sleep
import sys
def Enter_pressed(event):
input_get = input_field.get()
input_user.set("")
s.send(input_get.encode('utf-8'))
messages.insert(tk.END, 'Tu: %s' % input_get)
messages.itemconfigure(tk.END, background='lightgreen')
if input_get == "{Q}":
s.close()
window.quit()
def recv():
while True:
# conn, addr = s.accept()
try:
message = s.recv(2048)
if len(message)!=0:
message = message.decode('utf-8')
print(message)
messages.insert(tk.END, message)
if message[:8] == "Server: ":
messages.itemconfigure(tk.END, foreground='red')
except OSError: # left
break
def on_closing():
s.send("{Q}".encode('utf-8'))
s.close()
exit()
window = tk.Tk()
window.lower()
while True:
name = simpledialog.askstring("Asignar nombre", "Por favor, introduce tu nombre:", parent=window)
if name != 'Server':
break
else:
print("¡Tu nombre no puede ser 'Servidor'!")
window.title("Client(%s)" % name)
frame = tk.Frame(window) # , width=300, height=300)
scrollbar = tk.Scrollbar(frame)
messages = tk.Listbox(frame, width=50, height=15, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
messages.pack(side=tk.LEFT, fill=tk.BOTH)
frame.pack()
input_user = tk.StringVar()
input_field = tk.Entry(window, text=input_user)
input_field.pack(side=tk.BOTTOM, fill=tk.X)
input_field.bind("<Return>", Enter_pressed)
s = sk.socket(sk.AF_INET, sk.SOCK_STREAM)
if len(sys.argv) != 3:
print("Uso correcto: python 'archivo' 'dirección IP' 'puerto'")
exit()
IP_address = str(sys.argv[1])
Port = int(sys.argv[2])
s.connect((IP_address, Port))
s.send((name+'\n').encode('utf-8'))
print("Servidor(%s, %s) conectado" % (IP_address, Port))
thread = th.Thread(target=recv)
thread.start()
window.protocol("WM_DELETE_WINDOW", on_closing)
tk.mainloop()
|
[
"[email protected]"
] | |
e23e5efb00fe42ebfa8b57218117e27262831dba
|
7f7aec3330b83ed9fa36cb01152b7bda367cb949
|
/source_code/digit_recog_act_sig_w_RMS.py
|
a934f261c390d13f87b9de984aa2c8acc225918c
|
[
"MIT"
] |
permissive
|
Hanumanth004/Digit_recognizer
|
a935f483e029e6382015df3ed94c8ab37ac18c1f
|
3d5a49e5fc7bf953fe76e1cbc53866144011cbbc
|
refs/heads/master
| 2021-01-19T13:03:59.334134 | 2017-08-23T11:30:13 | 2017-08-23T11:30:13 | 100,818,300 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,021 |
py
|
import numpy as np
from scipy.special import expit
import mnist_loader
from random import randint
X_tr, X_val, X_te = mnist_loader.load_data_wrapper()
hidden_size=30
input_size=784
classes=10
K=0
BATCH_SIZE=100
learning_rate=0.1
decay_rate=0.9
Wh=np.random.randn(hidden_size,input_size)*0.01
bh=np.random.randn(hidden_size,1)*0.01
Wo=np.random.randn(classes,hidden_size)*0.01
bo=np.random.randn(classes,1)*0.01
def lossFunc(X_tmp):
global Wh
global bh
global Wo
global bo
global K
loss=0
dbo=np.zeros_like(bo)
dWo=np.zeros_like(Wo)
hg12=np.zeros_like(bh)
dbh=np.zeros_like(bh)
dh12tmp=np.zeros_like(bh)
dh12=np.zeros_like(bh)
dWh=np.zeros_like(Wh)
K=0
cache1=0.0
cache2=0.0
cache3=0.0
cache4=0.0
for i in xrange(len(X_tmp)):
X,T = X_tmp[i]
#forward propogation
h12 = np.dot(Wh, X) + bh
hg=expit(h12)
h3=np.dot(Wo,hg) + bo
hg3=expit(h3)
y=hg3
#loss+=0.5*(T-y)*(T-y)
loss_tmp=np.sum(0.5*(T-y)*(T-y))
loss_tmp=loss_tmp/10.0
loss+=loss_tmp
#backward propogation
de=-(T-y)
dhg3=hg3*(1-hg3)
dy=dhg3*de
dbo+=dy
dWo+=np.dot(dy,hg.T)
dh12=np.dot(Wo.T,dy)
dh12tmp=hg*(1-hg)*dh12
dbh+=dh12tmp
dWh+=np.dot(dh12tmp, X.T)
if(i%BATCH_SIZE==0):
mem=dWh*dWh
cache1=decay_rate*cache1+(1-decay_rate)*mem
Wh+=-learning_rate*dWh / np.sqrt(cache1 + 1e-8)
mem=dWo*dWo
cache2=decay_rate*cache2+(1-decay_rate)*mem
Wo+=-learning_rate*dWo / np.sqrt(cache2 + 1e-8)
mem=dbh*dbh
cache3=decay_rate*cache3+(1-decay_rate)*mem
bh+=-learning_rate*dbh / np.sqrt(cache3 + 1e-8)
mem=dbo*dbo
cache4=decay_rate*cache4+(1-decay_rate)*mem
bo+=-learning_rate*dbo / np.sqrt(cache4 + 1e-8)
dbo=np.zeros_like(bo)
dWo=np.zeros_like(Wo)
dbh=np.zeros_like(bh)
dWh=np.zeros_like(Wh)
K+=BATCH_SIZE
#np.clip(dWh,-2,2,dWh)
#np.clip(dWo,-2,2,dWo)
#np.clip(dbh,-2,2,dbh)
#np.clip(dbo,-2,2,dbo)
#return loss,dWh,dWo,dbh,dbo
return loss
X_tmp=X_tr
for ep in xrange(10):
correctly_classified=0
loss=lossFunc(X_tmp)
if (ep%5==0):
print "loss:%f" % (loss)
print "iteration number:%d" % (ep)
for i in xrange(len(X_tr)):
X,T = X_tr[i]
#forward propogation
h12 = np.dot(Wh, X) + bh
hg=expit(h12)
h3=np.dot(Wo,hg) + bo
hg3=expit(h3)
y=hg3
pos_e=np.argmax(y)
pos_g=np.argmax(T)
if (pos_e==pos_g):
correctly_classified+=1
accuracy=float(correctly_classified)/len(X_tr)
print 'training accuracy:%f' %(accuracy)
correctly_classified=0
for i in xrange(len(X_val)):
X,T = X_val[i]
#forward propogation
h12 = np.dot(Wh, X) + bh
hg=expit(h12)
h3=np.dot(Wo,hg) + bo
hg3=expit(h3)
y=hg3
pos_e=np.argmax(y)
if (pos_e==T):
correctly_classified+=1
accuracy=float(correctly_classified)/len(X_val)
print 'validation accuracy:%f' %(accuracy)
correctly_classified=0
for i in xrange(len(X_te)):
X,T = X_te[i]
#forward propogation
h12 = np.dot(Wh, X) + bh
hg=expit(h12)
h3=np.dot(Wo,hg) + bo
hg3=expit(h3)
y=hg3
pos_e=np.argmax(y)
if (pos_e==T):
correctly_classified+=1
accuracy=float(correctly_classified)/len(X_te)
print 'test set accuracy:%f' %(accuracy)
"""
X_tmp=X_tr
f2=open("./log_data/sig_RMSprop.dat",'w+')
for ep in xrange(50):
loss=lossFunc(X_tmp)
np.random.shuffle(X_tmp)
print "\nepoch number:%d" % (ep)
correctly_classified=0
if(ep%1==0):
f2.write(str(ep) + ' ' + str(loss))
f2.write("\n")
print loss
f2.close()
"""
|
[
"[email protected]"
] | |
6a03e512fd4317dd2c671be45ac153ffe1ea4646
|
08c775eefc9b1d9ce6660aee5eacc4d072d79c3c
|
/mmo/model/minivggnet.py
|
b11bbc9d2eacbbb71cb61becef42ad1a1c43a91c
|
[] |
no_license
|
minmyatoo/merguinet
|
c952154718131c4963c473a06c0f7f218cb90722
|
ffb59ceda6946b82684081e2c6c48e5412b9e798
|
refs/heads/master
| 2023-01-20T03:34:06.845361 | 2020-11-20T10:47:52 | 2020-11-20T10:47:52 | 313,382,572 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,218 |
py
|
# import the necessary packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
class MerguiNet:
@staticmethod
def build(width, height, depth, classes):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# first CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding="same",
input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Third CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Fourth CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# classifier softmax
model.add(Dense(classes))
model.add(Activation("softmax"))
# see the summary
model.summary()
# return the constructed network architecture
return model
|
[
"[email protected]"
] | |
837b5aa2779afab60eab22a966ca39223d8265d5
|
768f7a8aebf2969ce66404068df975aa98737f07
|
/pyteal/ast/nonce_test.py
|
917e78042463843815d22d6c2e91133cb49f1032
|
[
"MIT"
] |
permissive
|
Lumene98/pyteal
|
b2e1201aae006e7ea9492f0c42af8cee99caf4cd
|
9191e3c6074eaa7aaefac7dab0ab024d1110f8a6
|
refs/heads/master
| 2023-03-24T23:32:25.569906 | 2021-03-19T20:30:52 | 2021-03-19T20:30:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,252 |
py
|
import pytest
from .. import *
def test_nonce_base32():
expr = Nonce("base32", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M", Int(1))
assert expr.type_of() == TealType.uint64
expected = TealSimpleBlock([
TealOp(Op.byte, "base32(7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M)"),
TealOp(Op.pop),
TealOp(Op.int, 1)
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_base32_empty():
expr = Nonce("base32", "", Int(1))
assert expr.type_of() == TealType.uint64
expected = TealSimpleBlock([
TealOp(Op.byte, "base32()"),
TealOp(Op.pop),
TealOp(Op.int, 1)
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_base64():
expr = Nonce("base64", "Zm9vYmE=", Txn.sender())
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "base64(Zm9vYmE=)"),
TealOp(Op.pop),
TealOp(Op.txn, "Sender")
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_base64_empty():
expr = Nonce("base64", "", Int(1))
assert expr.type_of() == TealType.uint64
expected = TealSimpleBlock([
TealOp(Op.byte, "base64()"),
TealOp(Op.pop),
TealOp(Op.int, 1)
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_base16():
expr = Nonce("base16", "A21212EF", Int(1))
assert expr.type_of() == TealType.uint64
expected = TealSimpleBlock([
TealOp(Op.byte, "0xA21212EF"),
TealOp(Op.pop),
TealOp(Op.int, 1)
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_base16_prefix():
expr = Nonce("base16", "0xA21212EF", Int(1))
assert expr.type_of() == TealType.uint64
expected = TealSimpleBlock([
TealOp(Op.byte, "0xA21212EF"),
TealOp(Op.pop),
TealOp(Op.int, 1)
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_base16_empty():
expr = Nonce("base16", "", Int(6))
assert expr.type_of() == TealType.uint64
expected = TealSimpleBlock([
TealOp(Op.byte, "0x"),
TealOp(Op.pop),
TealOp(Op.int, 6)
])
actual, _ = expr.__teal__()
actual.addIncoming()
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
def test_nonce_invalid():
with pytest.raises(TealInputError):
Nonce("base23", "", Int(1))
with pytest.raises(TealInputError):
Nonce("base32", "Zm9vYmE=", Int(1))
with pytest.raises(TealInputError):
Nonce("base64", "?????", Int(1))
with pytest.raises(TealInputError):
Nonce("base16", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M", Int(1))
|
[
"[email protected]"
] | |
ffad9388f1dcca576425078110769343b97f4166
|
416239a621e5ad30d96a0680caa970592b15dad5
|
/models/Top1Net_Basic.py
|
40e5785cc1220b5c86c938387669f15d17ff40f7
|
[] |
no_license
|
jonozw/ECG_Nets
|
31596f8dec0c9bc3231aac679add8f5b52df5121
|
786cb4b12dc5859ffdd6690782017fdb5dd1f0e0
|
refs/heads/master
| 2022-12-31T21:28:38.142575 | 2020-10-08T02:41:07 | 2020-10-08T02:41:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,424 |
py
|
import torch
import torch.nn as nn
from torchsummary import summary
from models.BasicModule import BasicModule
import torch.nn.functional as F
#block1 = bn--relu--conv
class Block1(BasicModule):
def __init__(self, in_planes, planes, kernel_size=[1, 15], stride=[1, 2], padding=[0, 7]):
super(Block1, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.shortcut = nn.Sequential()
if stride != [1, 1] or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//4, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//4, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
out = self.conv1(out)
# Squeeze
w = F.avg_pool2d(out, kernel_size=[out.size(2), out.size(3)])
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
return out
#block2 = bn--relu--conv x3
class Block2(BasicModule):
def __init__(self, in_planes, planes, kernel_size=[1, 3], stride=[1, 1], padding=[0, 1]):
super(Block2, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.bn3 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.shortcut = nn.Sequential()
if stride != [1, 1] or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//4, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//4, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
out = self.conv1(out)
out = F.relu(self.bn2(out))
out = self.conv2(out)
out = F.relu(self.bn3(out))
out = self.conv3(out)
# Squeeze
w = F.avg_pool2d(out, kernel_size=[out.size(2), out.size(3)])
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
return out
#block3 = bn1--relu1--conv1-SE x3 bn1--relu1--conv1-SE x3
class Block3(BasicModule):
def __init__(self, in_planes, planes, kernel_size=3, stride=1, padding=1):
super(Block3, self).__init__()
#the first
self.bn1 = nn.BatchNorm1d(in_planes)
self.conv1 = nn.Conv1d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.bn3 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, planes, kernel_size=kernel_size, padding=padding, stride=2, bias=False)
self.shortcut1 = nn.Sequential(
nn.BatchNorm1d(in_planes),
nn.ReLU(),
nn.Conv1d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=2, bias=False)
)
# the first SE layers
self.fc1 = nn.Conv1d(planes, planes//4, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv1d(planes//4, planes, kernel_size=1)
# the second
# the first
self.bn4 = nn.BatchNorm1d(planes)
self.conv4 = nn.Conv1d(planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.bn5 = nn.BatchNorm1d(planes)
self.conv5 = nn.Conv1d(planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.bn6 = nn.BatchNorm1d(planes)
self.conv6 = nn.Conv1d(planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
self.shortcut2 = nn.Sequential(
nn.BatchNorm1d(in_planes),
nn.ReLU(),
nn.Conv1d(in_planes, planes, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)
)
# the second SE
self.fc3 = nn.Conv1d(planes, planes // 4, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc4 = nn.Conv1d(planes // 4, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
out = self.conv1(out)
out = F.relu(self.bn2(out))
out = self.conv2(out)
out = F.relu(self.bn3(out))
out = self.conv3(out)
# Squeeze
w = F.avg_pool1d(out, kernel_size=out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut1(x)
origin = out
# the second
out = F.relu(self.bn4(out))
out = self.conv4(out)
out = F.relu(self.bn5(out))
out = self.conv5(out)
out = F.relu(self.bn6(out))
out = self.conv6(out)
# Squeeze
w = F.avg_pool1d(out, kernel_size=out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut2(origin)
return out
class Top1Net(BasicModule):
def __init__(self, num_classes=55):
super(Top1Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=[1, 50], stride=[1, 2], padding=[0, 0], bias=False) # output:2476
self.bn1 = nn.BatchNorm2d(32)
self.inplanes = 32
self.layers_block1s = self._make_layer(Block1, self.inplanes, 32, 3, kernel_size=[1, 15], stride=[1, 2], padding=[0, 7])
#kernel=3,5,7 configurations
self.sizes = [3,5,7]
self.strides = [1,1,1]
self.pads = [1,2,3]
# self.sizes = [3, 5, 7, 9]
# self.strides = [1, 1, 1, 1]
# self.pads = [1, 2, 3, 4]
self.layer_block2s_list = []
self.layer2_block3s_list = []
for i in range(len(self.sizes)):
layers_block2s = self._make_layer(Block2, self.inplanes, self.inplanes, 4, kernel_size=[1, self.sizes[i]], stride=[1, self.strides[i]],
padding=[0, self.pads[i]])
self.layer_block2s_list.append(layers_block2s)
layers_block3s = self._make_layer(Block3, self.inplanes * 8, self.inplanes*8, 4, kernel_size=self.sizes[i], stride=self.strides[i],
padding=self.pads[i])
self.layer2_block3s_list.append(layers_block3s)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(256 * len(self.sizes), num_classes) #fully connected
self.sigmoid = nn.Sigmoid() # multi-task
def forward(self, x0):
x0 = x0.unsqueeze(1)
x0 = F.relu(self.bn1(self.conv1(x0)))
x0 = self.layers_block1s(x0)
xs = []
for i in range(len(self.sizes)):
x = self.layer_block2s_list[i](x0)
x = torch.flatten(x, start_dim=1, end_dim=2)
x = self.layer2_block3s_list[i](x)
x = self.avgpool(x)
xs.append(x)
out = torch.cat(xs,dim=2)
out = out.view(out.size(0), -1)
out = self.fc(out)
out = self.sigmoid(out)
return out
def _make_layer(self, block, inplanes, planes, blocks, kernel_size, stride, padding):
layers = []
for i in range(blocks):
layers.append(block(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=padding))
return nn.Sequential(*layers)
def Top1Net_b_25( num_classes=55):
"""
SE top1net
"""
model = Top1Net(num_classes=num_classes)
return model
def test_se_resNet():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# net = Block1(1,32,kernel_size=[1, 15], stride=[1, 2], padding=[0, 7])
# model = net.to(device)
# # print(summary(net, input_size=( 1, 8, 1238)))
# # y =net(torch.randn(32, 1, 8, 5000))
# # print(y.size())
# sizes = [3,4]
# print([1,sizes[1]])
# for i in range(10):
# print(i)
net = Top1Net_b_25(num_classes=55)
model = net.to(device)
y =net(torch.randn(32, 8, 5000))
print(y.size())
print(summary(net, input_size=(8, 5000)))
# net = Block2(32,32,kernel_size=[1, 3], padding=[0,1],stride=[1,1])
# model = net.to(device)
# print(summary(net,input_size=(32, 8, 310)))
# net = Block3(256,256,kernel_size=3, padding=1,stride=1)
# model = net.to(device)
# print(summary(net,input_size=(256, 20)))
if __name__ == '__main__':
test_se_resNet()
|
[
"[email protected]"
] | |
979890fa8a9aeb80307745607028a058432661c8
|
2d6ad0873cfc52d83cbdb0ecd9844ea83686bb38
|
/csvfileproject.py
|
0c7657a1b4f2ce1564a7a355f4de5051f710d5a3
|
[] |
no_license
|
amit928/practiced_python_codes
|
6cec1837b5ec91f3a22963e9b21b656fe80a4497
|
b8f9db2bdc5520729770e4077a9a8461a8c64bd3
|
refs/heads/master
| 2023-06-03T10:36:46.227166 | 2021-06-12T10:06:35 | 2021-06-12T10:06:35 | 376,256,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
import csv
file=open('amitzqxcsv.csv','r')
read=csv.reader(file)
x=input("Enter your marks : ")
for row in read:
if(row[3]==x):
print(row)
file.close()
|
[
"[email protected]"
] | |
a02d45d50426a72b18991c0c25da0082ba9e835f
|
1886065d10342822b10063cd908a690fccf03d8b
|
/appengine/findit/crash/loglinear/changelist_classifier.py
|
96277a04aefab650a935aa33a7cf08c3b48f7e7a
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/chromium-infra_A6Y5
|
26af0dee12f89595ebc6a040210c9f62d8ded763
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
refs/heads/master
| 2023-03-16T15:33:31.015840 | 2017-01-31T19:55:59 | 2017-01-31T20:06:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,932 |
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import logging
from common.chrome_dependency_fetcher import ChromeDependencyFetcher
from crash import changelist_classifier
from crash.changelist_classifier import StackInfo
from crash.crash_report_with_dependencies import CrashReportWithDependencies
from crash.loglinear.model import UnnormalizedLogLinearModel
class LogLinearChangelistClassifier(object):
"""A ``LogLinearModel``-based implementation of CL classification."""
def __init__(self, get_repository, meta_feature, meta_weight,
top_n_frames=7, top_n_suspects=3):
"""
Args:
get_repository (callable): a function from DEP urls to ``Repository``
objects, so we can get changelogs and blame for each dep. Notably,
to keep the code here generic, we make no assumptions about
which subclass of ``Repository`` this function returns. Thus,
it is up to the caller to decide what class to return and handle
any other arguments that class may require (e.g., an http client
for ``GitilesRepository``).
meta_feature (MetaFeature): All features.
meta_weight (MetaWeight): All weights. the weights for the features.
The keys of the dictionary are the names of the feature that weight is
for. We take this argument as a dict rather than as a list so that
callers needn't worry about what order to provide the weights in.
top_n_frames (int): how many frames of each callstack to look at.
top_n_suspects (int): maximum number of suspects to return.
"""
self._dependency_fetcher = ChromeDependencyFetcher(get_repository)
self._get_repository = get_repository
self._top_n_frames = top_n_frames
self._top_n_suspects = top_n_suspects
self._model = UnnormalizedLogLinearModel(meta_feature, meta_weight)
def __call__(self, report):
"""Finds changelists suspected of being responsible for the crash report.
Args:
report (CrashReport): the report to be analyzed.
Returns:
List of ``Suspect``s, sorted by probability from highest to lowest.
"""
annotated_report = CrashReportWithDependencies(
report, self._dependency_fetcher)
if annotated_report is None:
logging.warning('%s.__call__: '
'Could not obtain dependencies for report: %s',
self.__class__.__name__, str(report))
return []
suspects = self.GenerateSuspects(annotated_report)
if not suspects:
logging.warning('%s.__call__: Found no suspects for report: %s',
self.__class__.__name__, str(annotated_report))
return []
return self.RankSuspects(annotated_report, suspects)
def GenerateSuspects(self, report):
"""Generate all possible suspects for the reported crash.
Args:
report (CrashReportWithDependencies): the crash we seek to explain.
Returns:
A list of ``Suspect``s who may be to blame for the
``report``. Notably these ``Suspect`` instances do not have
all their fields filled in. They will be filled in later by
``RankSuspects``.
"""
# Look at all the frames from any stack in the crash report, and
# organize the ones that come from dependencies we care about.
dep_to_file_to_stack_infos = defaultdict(lambda: defaultdict(list))
for stack in report.stacktrace:
for frame in stack:
if frame.dep_path in report.dependencies:
dep_to_file_to_stack_infos[frame.dep_path][frame.file_path].append(
StackInfo(frame, stack.priority))
dep_to_file_to_changelogs, ignore_cls = (
changelist_classifier.GetChangeLogsForFilesGroupedByDeps(
report.dependency_rolls, report.dependencies,
self._get_repository))
# Get the possible suspects.
return changelist_classifier.FindSuspects(
dep_to_file_to_changelogs,
dep_to_file_to_stack_infos,
report.dependencies,
self._get_repository,
ignore_cls)
def RankSuspects(self, report, suspects):
"""Returns a lineup of the suspects in order of likelihood.
Suspects with a discardable score or lower ranking than top_n_suspects
will be filtered.
Args:
report (CrashReportWithDependencies): the crash we seek to explain.
suspects (iterable of Suspect): the CLs to consider blaming for the crash.
Returns:
A list of suspects in order according to their likelihood. This
list contains elements of the ``suspects`` list, where we mutate
some of the fields to store information about why that suspect
is being blamed (e.g., the ``confidence``, ``reasons``, and
``changed_files`` fields are updated). In addition to sorting the
suspects, we also filter out those which are exceedingly unlikely
or don't make the ``top_n_suspects`` cut.
"""
# Score the suspects and organize them for outputting/returning.
features_given_report = self._model.Features(report)
score_given_report = self._model.Score(report)
scored_suspects = []
for suspect in suspects:
score = score_given_report(suspect)
if self._model.LogZeroish(score):
logging.debug('Discarding suspect because it has zero probability: %s'
% str(suspect.ToDict()))
continue
suspect.confidence = score
# features is ``MetaFeatureValue`` object containing all feature values.
features = features_given_report(suspect)
suspect.reasons = features.reason
suspect.changed_files = [changed_file.ToDict()
for changed_file in features.changed_files]
scored_suspects.append(suspect)
scored_suspects.sort(key=lambda suspect: suspect.confidence)
return scored_suspects[:self._top_n_suspects]
|
[
"[email protected]"
] | |
5f1c9876b397f2a6214c19777cfdbc3a879dac4e
|
d924d8b865c878771b5752776fd46d9a8e92fe57
|
/venv/lib/python3.6/operator.py
|
2b20f2aede65cf193fcfbf2fbb805968f2a82b47
|
[] |
no_license
|
dts1346/cicd-buzz
|
46be577b03c5cb931e3516fed5deafaefc963c75
|
f00e1378a20a858d1339e2f1fc8607b478427ae4
|
refs/heads/master
| 2023-05-12T02:29:46.810082 | 2019-08-06T16:31:54 | 2019-08-06T16:31:54 | 200,687,520 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 53 |
py
|
/Users/tejasekhar/anaconda3/lib/python3.6/operator.py
|
[
"[email protected]"
] | |
2cfd8daa1660c41448828589412aabcb495d3bd4
|
54688f6bfa35f8943edcc46a551b4e282f61e007
|
/src/server/contest/migrations/0001_initial.py
|
6487927ad0d54686d73bb27cc3c0fa115c4e236d
|
[
"MIT"
] |
permissive
|
Agrawal-31/crux-judge
|
52ce8c4128a973fd02e3e65c2111b67b4bba45a0
|
a194f05d50a799bb3cddf4c23ea8d14fdadb20a2
|
refs/heads/master
| 2021-08-17T00:49:53.127134 | 2017-11-20T10:35:10 | 2017-11-20T15:44:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 901 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-29 20:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('trial', '0003_auto_20170728_2227'),
]
operations = [
migrations.CreateModel(
name='Problem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('max_score', models.FloatField(default=0)),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trial.Problem', verbose_name='problem')),
],
options={
'ordering': ['problem_id'],
'verbose_name': 'Contest Problem',
},
),
]
|
[
"[email protected]"
] | |
f67845c001a82eb0dcfa9f1f7952d10120248821
|
256a034b044fd1826a57ed6a079f908264243796
|
/data_Matplotlib.py
|
ba4e2170c86fc4833b2368475b2682d07f1e8a04
|
[] |
no_license
|
DAMS96/Course_TensorFlow_DeepLearning_Python
|
3c0580e2a58e1d022cfaa3bf9cc4b369818987a8
|
b5e873e39f7b7cd610fca225c0a9c70c39db81b8
|
refs/heads/master
| 2022-04-23T05:20:08.798539 | 2020-04-25T05:04:45 | 2020-04-25T05:04:45 | 258,642,175 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 362 |
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# x = (0,1,2,3,4,5,6,7,8)
# y = (0,1,2,3,4,5,6,7,8)
# plt.plot(x,y,'g*')
# plt.title('TItulo del grafico')
# plt.xlabel('eje de los valores x')
# plt.ylabel('eje de los valores y')
# plt.show()
array = np.arange(0,50).reshape(10,5)
print(array)
plt.imshow(array)
plt.colorbar()
plt.show()
|
[
"[email protected]"
] | |
e026a7597819cb1ea30bf7eeff93ee312490cb3b
|
d3c761185113a52d98da695df96fa46cc2e38667
|
/test/test_parser.py
|
fea5f5725e941a3b82391d2ab47b5c38a0d88daf
|
[] |
no_license
|
AndresBena19/rolly_interpreter
|
b43b66fc570900f5209788b5c7d21d093093d19c
|
aba8a96b1944f2197590ce2e225729be6520e1a8
|
refs/heads/master
| 2020-04-15T07:00:36.142454 | 2020-02-26T05:18:33 | 2020-02-26T05:18:33 | 164,483,716 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 675 |
py
|
import unittest
import sys
from XETLlexer.lexer import lexer
from XETLparser.operations import parser_tree
class TestParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.setrecursionlimit(5000)
@classmethod
def tearDownClass(cls):
sys.setrecursionlimit(1000)
def setUp(self):
pass
def tearDown(self):
pass
def test_deep_nested_operation(self):
a = """IF(1.2 > 1; {}; "NOT")"""
b = """IF(1.2 > 1; {}; "NOT")"""
for _ in range(0, 200):
b = a.format(b)
text_nested = b.replace('{}', "OK")
tokens = lexer(text_nested)
parser_tree(tokens)
|
[
"[email protected]"
] | |
72165727392d92a8ea91efc6e3297f575469e6e3
|
c0fb9a80e558848bb8ab327cf96c8afb443b9536
|
/simple_django/manage.py
|
d946c12585ad04e1dab97425c8fd5f98403409db
|
[] |
no_license
|
aledotdev/django-simple-test
|
4ec20eb97b9fc4e6d39b6e278cccbf9091bc8612
|
575f3866a9311fdd54c061d7ff4535180a004b2e
|
refs/heads/master
| 2020-03-09T13:40:38.944284 | 2018-04-09T19:01:05 | 2018-04-09T19:07:47 | 128,817,097 | 0 | 0 | null | 2018-04-09T18:54:13 | 2018-04-09T18:33:33 |
Python
|
UTF-8
|
Python
| false | false | 545 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simple_django.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
f2c5da5c1d2cf18d5ad626b121ead0bff447b272
|
c80bf7d01ae92f95edefa74f78a7eed634e2bb0c
|
/compose-validator/parser.py
|
2050cb2f2c7a98d253ae031ba586228574fcac2f
|
[] |
no_license
|
knigawkl/compiler
|
01a0e0c5a40f4793a26651c5823826cbf99f9037
|
a870733ca435438f0097cf47dfc4dc613b97ba03
|
refs/heads/main
| 2023-06-02T22:03:49.465178 | 2021-05-17T12:04:47 | 2021-05-17T12:04:47 | 343,549,857 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,455 |
py
|
from prettytable import PrettyTable
from scanner import Scanner
from exceptions import UnexpectedStartToken, UnexpectedToken, UnknownStatement
from tokens import TokenType
from logger import logger
from utils import represents_int
START_TOKENS = (TokenType.SERVICES, TokenType.VERSION, TokenType.NETWORKS, TokenType.VOLUMES, TokenType.PORTS)
VALUE_TOKENS = (TokenType.NUMBER, TokenType.ID, TokenType.STRING)
class Parser:
""" Performs syntax analysis
Backus-Naur Form (BNF grammar):
<services> ::= 'services'
<version> ::= 'version'
<networks> ::= 'networks'
<volumes> ::= 'volumes'
<build> ::= 'build'
<ports> ::= 'ports'
<image> ::= 'image'
<environment> ::= 'environment'
<deploy> ::= 'deploy'
# <number> ::= r'\d+(\.\d*)?'
# <id> ::= r'[A-Za-z_./-]+'
# <string> ::= r'\"(.*?)\"'
<assign> ::= ':'
<li> ::= '-'
<quote> ::= '\"'
<dot> ::= '.'
<eof> ::= end of file
<eps> ::= blank
<ports_string> ::= <quote> <number> <assign> <number> <quote>
<value> ::= <number> | <id> | <string> | <port_string> | <version_string> | <eps>
<version_string> ::= <quote> <number> <dot> <number> <quote> | <quote> <number> <quote>
<string_array> ::= <li> <string> <string_array> | <eps>
<ports_string_array> ::= <li> <ports_string> <ports_string_array> | <eps>
<number_array> ::= <li> <number> <number_array> | <eps>
<id_array> ::= <li> <id> <id_array> | <eps>
<volume_array> ::= <li> <id> <assign> <id> <volume_array> | <eps>
<dictionary> ::= <id> <assign> <value> <dictionary> | <eps>
<start> ::= <program> <eof>
<program> ::= <statement> <program> | <eps>
<statement> ::= <version_stmt> | <services_stmt> | <networks_stmt> | <volumes_stmt>
<services_stmt> ::= <ports_stmt> | <build_stmt> | <image_stmt> | <environment_stmt> | <deploy_stmt> |
<service_networks_stmt> | <service_volumes_stmt>
<version_stmt> ::= <version> <assign> <version_string>
<networks_stmt> ::= <networks> <dictionary>
<volumes_stmt> ::= <volumes> <dictionary>
<ports_stmt> ::= <ports> <assign> <ports_string_array>
<build_stmt> ::= <build> <assign> <id>
<image_stmt> ::= <image> <assign> <id>
<environment_stmt> ::= <environment> <dictionary>
<deploy_stmt> ::= <deploy> <dictionary>
<service_networks_stmt> ::= <networks> <assign> <id_array>
<service_volumes_stmt> ::= <volumes> <assign> <volume_array>
"""
def __init__(self, scanner: Scanner):
logger.info("\nPerforming syntax analysis")
self.next_token = scanner.next_token
self.token = self.next_token()
self.table = PrettyTable()
self.table.field_names = ['Start line', 'End line', 'Statement type']
self.indent_level = 0
def __del__(self):
logger.info(self.table.get_string(sortby='Start line', sort_key=lambda row: int(row[0])))
def __take_token(self, token_type: str):
if self.token.type != token_type:
raise UnexpectedToken(f'Expected type: {token_type}, but got {self.token}')
if token_type != TokenType.EOF:
self.token = self.next_token()
def parse(self):
if self.token.type in START_TOKENS or self.token.type == TokenType.EOF:
self.__program()
self.__take_token(TokenType.EOF)
else:
raise UnexpectedStartToken(self.token)
def __program(self):
if self.token.type in START_TOKENS:
self.__statement()
self.__program()
else:
pass
def __statement(self):
stmts = {TokenType.VERSION: self.__version_stmt,
TokenType.NETWORKS: self.__networks_stmt,
TokenType.VOLUMES: self.__volumes_stmt,
TokenType.SERVICES: self.__services_stmt}
if self.token.type in stmts:
stmts[self.token.type]()
else:
raise UnknownStatement(self.token)
def __service_statement(self):
"""
Statements inside service are read here.
List of possible statements inside service is strictly constrained and listed below inside service_stmts.
"""
start_line, service_name, indent_level = self.token.line, self.token.value, self.token.column
service_stmts = {TokenType.PORTS: self.__ports_stmt,
TokenType.BUILD: self.__build_stmt,
TokenType.IMAGE: self.__image_stmt,
TokenType.ENVIRONMENT: self.__environment_stmt,
TokenType.DEPLOY: self.__deploy_stmt,
TokenType.NETWORKS: self.__service_networks_stmt,
TokenType.VOLUMES: self.__service_volumes_stmt, }
if self.token.type in service_stmts:
service_stmts[self.token.type]()
elif self.token.type == TokenType.EOF:
pass
else:
raise UnknownStatement(self.token)
if self.token.column == indent_level:
self.__service_statement()
def __array(self, item_type: str = TokenType.STRING, is_ports: bool = False):
if self.token.type == TokenType.LI:
self.__take_token(TokenType.LI)
if is_ports:
self.__validate_ports_string()
self.__value([item_type])
self.__array(item_type)
else:
pass
def __volume_array(self):
if self.token.type == TokenType.LI:
self.__take_token(TokenType.LI)
self.__value([TokenType.ID])
self.__take_token(TokenType.ASSIGN)
self.__value([TokenType.ID])
self.__volume_array()
else:
pass
def __dictionary(self):
start_line, service_name, indent_level = self.token.line, self.token.value, self.token.column
if self.token.type == TokenType.ID:
self.__take_token(TokenType.ID)
self.__take_token(TokenType.ASSIGN)
if self.token.line == start_line:
self.__value()
if self.token.column == indent_level:
self.__dictionary()
else:
pass
def __take_dict(self):
self.__take_token(TokenType.ASSIGN)
self.__dictionary()
def __services_dict(self):
"""
IDs here are names of the services. Phrases like "wordpress:" are read
and then self.service_dict() is called in order to read content of each service
"""
start_line, service_name, indent_level = self.token.line, self.token.value, self.token.column
if self.token.type == TokenType.ID:
self.__take_token(TokenType.ID)
self.__take_token(TokenType.ASSIGN)
self.__service_statement()
self.table.add_row([start_line, self.token.line - 1, f"{TokenType.SERVICES}:{service_name}"])
if self.token.column == indent_level:
self.__services_dict()
else:
pass
def __services_stmt(self):
"""
self.statement() has found services keyword and routes us here
phrase "services:" is read and then we start reading the services one by one
"""
start_line = self.token.line
self.__take_token(TokenType.SERVICES)
self.__take_token(TokenType.ASSIGN)
self.__services_dict()
self.table.add_row([start_line, self.token.line - 1 if (self.token.line - 1) > 0 else 1, TokenType.SERVICES])
def __version_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.VERSION)
self.__take_token(TokenType.ASSIGN)
self.__validate_version_string(separator=".")
self.__value([TokenType.STRING])
self.table.add_row([start_line, self.token.line - 1 if (self.token.line - 1) > 0 else 1, TokenType.VERSION])
def __ports_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.PORTS)
self.__take_token(TokenType.ASSIGN)
self.__array(item_type=TokenType.STRING, is_ports=True)
self.table.add_row([start_line, self.token.line - 1, TokenType.PORTS])
def __service_networks_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.NETWORKS)
self.__take_token(TokenType.ASSIGN)
self.__array(item_type=TokenType.ID)
self.table.add_row([start_line, self.token.line - 1, TokenType.NETWORKS])
def __service_volumes_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.VOLUMES)
self.__take_token(TokenType.ASSIGN)
self.__volume_array()
self.table.add_row([start_line, self.token.line - 1, TokenType.VOLUMES])
def __volumes_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.VOLUMES)
self.__take_dict()
self.table.add_row([start_line, self.token.line - 1 if (self.token.line - 1) > 0 else 1, TokenType.VOLUMES])
def __build_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.BUILD)
self.__take_token(TokenType.ASSIGN)
self.__value([TokenType.ID])
self.table.add_row([start_line, self.token.line - 1, TokenType.BUILD])
def __image_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.IMAGE)
self.__take_token(TokenType.ASSIGN)
self.__value([TokenType.ID])
self.table.add_row([start_line, self.token.line - 1, TokenType.IMAGE])
def __networks_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.NETWORKS)
self.__take_dict()
self.table.add_row([start_line, self.token.line - 1 if (self.token.line - 1) > 0 else 1, TokenType.NETWORKS])
def __environment_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.ENVIRONMENT)
self.__take_dict()
self.table.add_row([start_line, self.token.line - 1, TokenType.ENVIRONMENT])
def __deploy_stmt(self):
start_line = self.token.line
self.__take_token(TokenType.DEPLOY)
self.__take_dict()
self.table.add_row([start_line, self.token.line - 1, TokenType.DEPLOY])
def __validate_version_string(self, separator):
""" Validate version string """
version_str = self.token.value
version_content = version_str.strip("\"")
version_parts = version_content.split(separator)
for part in version_parts:
if not represents_int(part):
raise UnexpectedToken("Improper version string content", self.token._replace(type = "version"))
def __validate_ports_string(self):
""" Validate ports string """
ports_str = self.token.value
ports_content = ports_str.strip("\"")
ports_parts = ports_content.split(":")
if len(ports_parts) != 2:
raise UnexpectedToken("Improper ports string content", self.token._replace(type="ports"))
for part in ports_parts:
if not represents_int(part):
raise UnexpectedToken("Improper ports string content", self.token._replace(type = "ports"))
def __value(self, allowed_types: [str] = VALUE_TOKENS):
if self.token.type in allowed_types:
self.__take_token(self.token.type)
else:
raise UnexpectedToken(f'Expected types: {allowed_types}, but got {self.token}')
|
[
"[email protected]"
] | |
16fa0a4b39d17c4ece50384f657fc65fb6ee0fef
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02271/s666225963.py
|
a1e305d0cdb1bc4f6641e39bb56d1f7301cd5a82
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
# ALDS_5_A - 総当たり
import sys
n = int(input())
A = list(map(int, sys.stdin.readline().strip().split()))
q = int(input())
m = list(map(int, sys.stdin.readline().strip().split()))
sum_set = set()
for i in range(2 ** n):
bit = [(i >> j) & 1 for j in range(n)]
combined = [x * y for (x, y) in zip(A, bit)]
sum_set.add(sum(combined))
for target in m:
if target in sum_set:
print('yes')
else:
print('no')
|
[
"[email protected]"
] | |
90f5f255e73ca497a4b8e8aed744da5ac49e4dda
|
742fdd98cdb87119e67af1b9c2f18f475c549f9b
|
/todo/models.py
|
df2f2639465de6a56bc1105c88132380cef7b174
|
[] |
no_license
|
Devashishsingh98/Todo-List
|
bec2a27ef45f53feb615baed6cb74e441b1a23c3
|
8e10c52c1239bc099b8332de3d9e5f6b90ebe9f9
|
refs/heads/master
| 2021-03-26T07:35:06.792569 | 2020-04-05T15:23:58 | 2020-04-05T15:23:58 | 247,684,639 | 0 | 0 | null | 2020-03-16T11:26:36 | 2020-03-16T11:26:35 | null |
UTF-8
|
Python
| false | false | 203 |
py
|
from django.db import models
class Todo(models.Model):
text = models.CharField(max_length=100)
complete = models.BooleanField(default=False)
def __str__(self):
return self.text
|
[
"[email protected]"
] | |
5e02976a619cb1e6ada32cf79cbd4ed879067ae8
|
4b69b5dd4b1b3cf81b996065831226a243abb332
|
/articles/admin.py
|
45fafe2207a9eb4a089c73b9557ee149401c8418
|
[] |
no_license
|
cui0519/myBlog
|
d8ebd601ac5bf5a3fe0dc16e2c703cdbaa055ab9
|
c0852b6e42bfa93820d330e8f9e547be229344e8
|
refs/heads/master
| 2023-02-09T06:33:13.641351 | 2021-01-05T00:18:21 | 2021-01-05T00:18:21 | 326,308,408 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 441 |
py
|
from django.contrib import admin
from .models import Articles
# Register your models here.
class ArticlesAdmin(admin.ModelAdmin):
list_display = ('title','author','img','abstract','visited','created_at')
<<<<<<< HEAD
search_fields = ('title','author','abstract','content')
=======
search_fields = ('title',)
>>>>>>> f4d958d ('模板复用')
list_filter = list_display
admin.site.register(Articles,ArticlesAdmin)
|
[
"[email protected]"
] | |
2ce60a0dc8f0d186cf58e7ed69897d3ba80923a1
|
895611e8fe516fdef39fc14b7dac954614ebbec2
|
/ranking/migrations/0001_initial.py
|
a60686bbbe8576ee28e3e397bd76e111692a891e
|
[] |
no_license
|
puilp0502/paperrank
|
7e278de27b2f86ced40e1489a59894f396c98f79
|
2bc63214f4cf31fa67caa3334a3fa475e2c1a5e5
|
refs/heads/master
| 2021-07-12T11:40:59.968221 | 2019-11-01T16:36:02 | 2019-11-21T02:45:11 | 134,579,843 | 0 | 0 | null | 2020-06-05T18:23:13 | 2018-05-23T14:13:46 |
Python
|
UTF-8
|
Python
| false | false | 1,358 |
py
|
# Generated by Django 2.0.3 on 2018-05-27 13:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=140)),
('slug', models.SlugField(blank=True)),
('author', models.CharField(max_length=255)),
('year', models.IntegerField()),
('abstract', models.TextField(blank=True)),
('score', models.FloatField()),
('registered', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
],
),
migrations.AddField(
model_name='paper',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ranking.Publisher'),
),
]
|
[
"[email protected]"
] | |
2925380a3da3d5570b8f7b3da8d4507d2a652c58
|
2d66758d59bede6354ebb8884ef499e537b697dc
|
/pariksha/challenge/migrations/0020_featureflag.py
|
6568e2cdfaab86daf6739e97a20db5b343aadb23
|
[] |
no_license
|
yogendramaarisetty/pariksha
|
40658e40eb9c770db111a69e18f416e17b6ea072
|
9c97df6eb175ea509f88589058c8da9eb67e2ebb
|
refs/heads/master
| 2023-05-01T12:12:08.471298 | 2023-04-22T12:34:04 | 2023-04-22T12:34:04 | 241,330,792 | 1 | 0 | null | 2023-04-22T12:32:44 | 2020-02-18T10:16:47 |
PLpgSQL
|
UTF-8
|
Python
| false | false | 602 |
py
|
# Generated by Django 3.0.4 on 2022-10-12 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenge', '0019_submission'),
]
operations = [
migrations.CreateModel(
name='FeatureFlag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField()),
('value', models.BooleanField()),
('description', models.TextField()),
],
),
]
|
[
"[email protected]"
] | |
c5b193fb983b5e4d663f93a6485499e152a180c1
|
e5cf5fd657b28d1c01d8fd954a911d72526e3112
|
/tide_teach/tide_time_windows.py
|
b54f5fcebaccedcc95ffb40b903d76d6c69a1cd4
|
[] |
no_license
|
parkermac/ptools
|
6b100f13a44ff595de03705a6ebf14a2fdf80291
|
a039261cd215fe13557baee322a5cae3e976c9fd
|
refs/heads/master
| 2023-01-09T11:04:16.998228 | 2023-01-02T19:09:18 | 2023-01-02T19:09:18 | 48,205,248 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,730 |
py
|
"""
Code to plot observed tide time series.
"""
import os
import sys
import pytz
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import numpy as np
from importlib import reload
import ephem_functions as efun
reload(efun)
import tractive_functions as tfun
reload(tfun)
alp = os.path.abspath('../../LiveOcean/alpha')
if alp not in sys.path:
sys.path.append(alp)
import zfun
indir = os.environ.get('HOME') + '/Documents/ptools_data/tide/'
zone='US/Pacific'
tz_local = pytz.timezone(zone)
def read_tide(in_fn):
df = pd.read_csv(in_fn, index_col='Date Time', parse_dates = True)
for k in df.keys():
df = df.rename(columns={k: k.strip()})
df = df.drop(['Sigma', 'I', 'L'], axis=1)
df = df.rename(columns={'Water Level': 'Tide Obs'})
# find the mean water level
eta0 = df['Tide Obs'].mean()
# Assumes time is UTC
df.index.name = 'Date UTC'
df = df.tz_localize('UTC')
return df, eta0
# READ IN OBSERVED TIDE DATA
fn = 'CO-OPS__9447130__hr.csv' # Seattle 2016 observed data
city = 'Seattle'
obs_fn = indir + fn
obs_df, eta0 = read_tide(obs_fn)
obs_df = obs_df.tz_convert(tz_local)
obs_df.index.name = 'Date (local time)'
obs_df['Tide Obs'] = obs_df['Tide Obs'] * 3.28084
# and set related time limits
year = 2016
#tzinfo = pytz.timezone('UTC')
tzinfo = tz_local
dt0_day = datetime(year,6,10,tzinfo=tzinfo)
dt1_day = datetime(year,6,11,tzinfo=tzinfo)
dt0_month = datetime(year,6,1,tzinfo=tzinfo)
dt1_month = datetime(year,7,1,tzinfo=tzinfo)
dt0_year = datetime(year,1,1,tzinfo=tzinfo)
dt1_year = datetime(year+1,1,1,tzinfo=tzinfo)
# PLOTTING
plt.close('all')
lw0 = 0.5
lw1 = 1
lw2 = 3
fsz=18
ylim=(-5, 15)
fig = plt.figure(figsize=(14,8))
ax = fig.add_subplot(221)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw2, grid=True, xlim=(dt0_day,dt1_day))
ax.text(.05,.05,'One Day', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.text(.05,.9,'Observed Tide Height (ft) ' + city,
transform=ax.transAxes, fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
ax = fig.add_subplot(222)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw1, grid=True, xlim=(dt0_month,dt1_month))
ax.text(.05,.05,'One Month', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
ax = fig.add_subplot(212)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw0, grid=True, xlim=(dt0_year,dt1_year))
ax.text(.05,.05,'One Year', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
fig.set_tight_layout(True)
plt.show()
|
[
"[email protected]"
] | |
9cd66536cdc51a43bf901eccb7e2154f2e6368ec
|
768058e7f347231e06a28879922690c0b6870ed4
|
/venv/lib/python3.7/site-packages/numba/cuda/simulator/compiler.py
|
5a88a649e47d11efe9887678a7397e77376673b8
|
[] |
no_license
|
jciech/HeisenbergSpinChains
|
58b4238281d8c158b11c6c22dd0da82025fd7284
|
e43942bbd09f6675e7e2ff277f8930dc0518d08e
|
refs/heads/master
| 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 148 |
py
|
"""
The compiler is not implemented in the simulator. This module provides a stub
to allow tests to import successfully.
"""
compile_kernel = None
|
[
"[email protected]"
] | |
34ea95617be832a8ef0f9d01010a60845f4ce08a
|
06313b5aff0c376c5c146c82eeeb3d82399d93aa
|
/cfratinghandler.py
|
df72e93e6359625feea3b688b20753455387dac1
|
[
"MIT"
] |
permissive
|
PIE0/icpc-amarchi
|
34b4ba8abdc968730eb4bf00c0f53503a1bd069e
|
9272002a2d0d9e07d7fe20349ced830129fb68fc
|
refs/heads/master
| 2020-05-24T18:02:13.662810 | 2019-05-18T20:17:50 | 2019-05-18T20:17:50 | 187,400,638 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,029 |
py
|
import ast
import json
import os
import requests
import threading
handle_link = 'https://codeforces.com/api/user.info?handles='
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
local_path = ROOT_DIR + '/users/'
def get_user_rating(handle, local=False):
if not local:
res = requests.get(handle_link + handle)
res = ast.literal_eval(res.text)
else:
path = local_path + handle + '.json'
try:
file = open(path)
except:
print("Downloading {}'s rating...".format(handle))
url = handle_link + handle
res = requests.get(url, allow_redirects=True)
open(path, 'wb').write(res.content)
file = open(path)
res = json.loads(file.readline())
if res['status'] != 'OK' or 'rating' not in res['result'][0]:
return {handle: {'rate': -1, 'max rate': -1}}
res = res['result'][0]
return {handle: {'rate': res['rating'], 'max rate': res['maxRating']}}
threadLock = threading.Lock()
users = {}
class RateThread(threading.Thread):
def __init__(self, thread_id, team, local=False):
threading.Thread.__init__(self)
self.threadID = thread_id
# print("getting {} team's ratings".format(self.threadID))
self.team = team
self.local = local
def run(self):
for player in self.team:
ratings = get_user_rating(player, self.local)
threadLock.acquire()
users.update(ratings)
threadLock.release()
def get_ratings(teams, local=False):
bucket = 2
while len(teams):
threads = []
cnt = 0
names = []
for name in teams:
names.append(name)
thread = RateThread(name, teams[name], local)
thread.start()
threads.append(thread)
cnt += 1
if cnt >= bucket:
break
for name in names:
teams.pop(name)
for t in threads:
t.join()
return users
|
[
"[email protected]"
] | |
d1022f584747e762e37caeea29babb6ece88fe51
|
1633df15fc909ecc875224466d2e54d97c84e11b
|
/techmngt/migrations/0005_auto_20190114_1655.py
|
1a51b80681f541bf8f505f82fc76f7ec921fe9f6
|
[] |
no_license
|
fmatray/groovy
|
c35da073e6b472e1d1dd2510d13919d131ccbfc4
|
7dcbf36b933beb47afd74ef12aecda1f62cf3998
|
refs/heads/master
| 2020-04-12T12:47:13.826015 | 2019-01-21T17:06:41 | 2019-01-21T17:06:41 | 162,502,052 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,806 |
py
|
# Generated by Django 2.1.4 on 2019-01-14 15:55
from django.db import migrations, models
import django_cryptography.fields
import markdownx.models
class Migration(migrations.Migration):
dependencies = [
('techmngt', '0004_auto_20190114_1212'),
]
operations = [
migrations.AlterField(
model_name='asynchronousflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='asynchronousflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='asynchronousflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicalasynchronousflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicalasynchronousflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicalasynchronousflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicalbatchflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicalbatchflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicalbatchflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicalnetworkflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicalnetworkflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicalnetworkflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicalprotocol',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicalprotocol',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicalprotocol',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicalserver',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicalserver',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicalserver',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicalservertype',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicalservertype',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicalservertype',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicaltechflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicaltechflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicaltechflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='historicaluriflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='historicaluriflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='historicaluriflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='networkflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='networkflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='networkflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='protocol',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='protocol',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='protocol',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='server',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='server',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='server',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='servertype',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='servertype',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='servertype',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
migrations.AlterField(
model_name='techflow',
name='comment',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(blank=True, help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", verbose_name='Comment')),
),
migrations.AlterField(
model_name='techflow',
name='description',
field=django_cryptography.fields.encrypt(markdownx.models.MarkdownxField(help_text="<a href='https://en.wikipedia.org/wiki/Markdown'>You can use Markdown</a>", null=True, verbose_name='Description')),
),
migrations.AlterField(
model_name='techflow',
name='documentation',
field=django_cryptography.fields.encrypt(models.URLField(blank=True, null=True, verbose_name='Documentation')),
),
]
|
[
"D@isuke10"
] |
D@isuke10
|
95135f1e23b15e25a1b4ba5d78ecef4dd3de1c36
|
694c3d6ef7b75f640b0fe21b1637093f020b1cd3
|
/palindromeCheck.py
|
540c41630f16dec12ac2a0c7b0aa577651912fb8
|
[] |
no_license
|
MinhNguyen153216/palindromeCheck
|
216fa6f18dc23b978517bb2433cd682ab1ea541f
|
ddbcf7ca4cee2578e34caded49eca45871386783
|
refs/heads/master
| 2023-03-29T04:53:16.527905 | 2021-03-26T13:57:21 | 2021-03-26T13:57:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 17:27:54 2021
@author: ASUS
"""
def find_palindrome(s):
if s == s[::-1]:
return True
else:
return False
print(find_palindrome('momom'))
# momom
|
[
"[email protected]"
] | |
0c46e45ada2e54e6238b1dc83d60a0db7b447b88
|
bbfae437b046fb0e19190f45a59e92d3a65a1f52
|
/day02/3-爬取出版社.py
|
b3459ac64cc0ca6b1aa63fa79428965d956c6f24
|
[] |
no_license
|
KEVINWANGXIANG/Scrapy_and_Data_analyse
|
87ce4f0d48637e0fd40a7a40054e8cc395885deb
|
66eb3cdb100837d90169e2368964af0a11167928
|
refs/heads/master
| 2020-05-19T19:21:57.122613 | 2019-05-06T10:50:31 | 2019-05-06T10:50:31 | 185,177,711 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 794 |
py
|
import urllib.request
import os,re
url = r"https://read.douban.com/provider/all"
path=r"F:\Python\爬虫与数据分析\day02\file.html"
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
req=urllib.request.Request(url,headers=headers)
response=urllib.request.urlopen(req)
HtmlStr=response.read().decode("utf-8")
# with open(path,"w",encoding="utf-8") as f:
# f.write(HtmlStr)
# <div class="name">北京邮电大学出版社</div>
pat=r'<div class="name">(.*?)</div>'
re_publish=re.compile(pat)
data=re_publish.findall(HtmlStr)
# print(data)
toPath=r"F:\Python\爬虫与数据分析\day02\file.txt"
for pub in data:
with open(toPath, "a", encoding="utf-8") as f:
f.write(pub+"\n")
|
[
"[email protected]"
] | |
ff81e1f3950f62d4dfbb90dc28261e8c8886eacc
|
88c2b8b94fa184786ae57d013c0d2e9d85daf2eb
|
/final_submission_scripts/py_scripts/FS_rf_full.py
|
60726f331e1ad8f19ce2157937e0b04118ce5df2
|
[] |
no_license
|
gianlucamancini7/ml_project2
|
adffb9916fb3e91ce2851e296aed8f2b8c91583a
|
25d21919af84c99ba90daa1e19983a963cc98a84
|
refs/heads/master
| 2020-04-04T20:46:30.355669 | 2018-12-23T09:12:59 | 2018-12-23T09:12:59 | 156,260,582 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,464 |
py
|
# coding: utf-8
# In[3]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
pd.set_option('display.max_columns', 100)
# In[ ]:
INPUT_PATH = '~/scripts/'
OUTPUT_PATH = '/raid/motus/results/randomforest/'
def season_splitter(df):
df.index = pd.to_datetime(df.index)
df_spring = df[(df.index > '2018-03-20') & (df.index <= '2018-06-20')]
df_summer = df[(df.index > '2018-06-21') & (df.index <= '2018-09-21')]
df_autumn = df[(df.index > '2018-09-21') & (df.index <= '2018-12-21')]
df_winter = df[(df.index > '2018-12-21') + (df.index <= '2018-03-20')]
return df_spring, df_summer, df_autumn, df_winter
# # Load and preprocess the data
# In[65]:
tot_df=pd.read_csv('regression_mat_year.csv',index_col=0)
# In[66]:
tot_df=pd.read_csv(INPUT_PATH + 'regression_mat_year.csv',index_col=0)
# create columns with coordinate velocities output
tot_df['u_x']=tot_df['u']*np.cos(np.radians(tot_df['direction']))
tot_df['u_y']=tot_df['u']*np.sin(np.radians(tot_df['direction']))
# create columns with coordinate velocities input top mast anemometer
tot_df['u_top_x']=tot_df['u_top']*np.cos(np.radians(tot_df['direction_top']))
tot_df['u_top_y']=tot_df['u_top']*np.sin(np.radians(tot_df['direction_top']))
# drop the columns which are not used anymore
tot_df=tot_df.drop(columns=['u', 'u_top', 'direction', 'direction_top'])
tot_df=tot_df.iloc[0:,:]
# # Random forest feature selection
# Pipeline: Discretize the output -> Random forest
# <br>Output: u_x, u_y, z
# ## Prepare the input and output
# In[12]:
x = np.array(tot_df.drop(columns=['u_x', 'u_y','u_z']))
y_continue = np.array(tot_df[['u_x', 'u_y']])
# ## Discretize the output
# In[13]:
discretizer = KBinsDiscretizer(n_bins=20, encode='ordinal', strategy='uniform')
discretizer.fit(y_continue)
y_disc = discretizer.transform(y_continue)
# ## Split train and test
# In[14]:
x_tr, x_te, y_tr, y_te = train_test_split(x, y_disc, test_size=0.3, random_state=42)
# ## Random forest
# In[15]:
#y_tr_cont = discretizer.inverse_transform(y_tr)
# In[16]:
rf = RandomForestClassifier(n_estimators=1000, max_depth=None, criterion='gini', random_state=0)
rf.fit(x_tr, y_tr)
# ## Print the result(feature importance)
# In[18]:
feat_labels = tot_df.drop(columns=['u_x', 'u_y','u_z']).columns
# In[74]:
importances = rf.feature_importances_
indices = np.argsort(importances)[::-1]
important_features = []
importance_accum = 0
#open("feature_importance.txt", 'w').close
filetxt = open(OUTPUT_PATH + "FS_RF_full.txt", "w")
filetxt.write("\n For the full year: \n")
for f in range(x_tr.shape[1]):
print("%2d) %-*s %f" % (f + 1, 50, feat_labels[indices[f]], importances[indices[f]]))
filetxt.write("%2d) %-*s %f \n" % (f + 1, 50, feat_labels[indices[f]], importances[indices[f]]))
if importance_accum < 0.80:
importance_accum = importance_accum + importances[indices[f]]
important_features.append(feat_labels[indices[f]])
filetxt.write("\n The top 80% important features are: \n")
for i in range(len(important_features)):
filetxt.write("%s \n" % important_features[i])
filetxt.write("%i features on %i" % (len(important_features), x_tr.shape[1]))
filetxt.close()
|
[
"[email protected]"
] | |
e04e36754867e4cce7f582ff4d53b49ee1c01c21
|
12b2f39a37cd022ced27fc266a5d1b47aba76b9d
|
/vanilla/demo.py
|
d38a082cbab51d9c0fb0472a89eadb0100bc9b68
|
[] |
no_license
|
ZhangYet/vanguard
|
7a3e89035775dcf0b8c60a8a05165bc6e6af14e8
|
660cfdbcfb747c372e24466ef14680ffcbd2c68c
|
refs/heads/master
| 2021-07-01T05:47:52.398437 | 2021-01-04T08:43:34 | 2021-01-04T08:43:34 | 207,758,249 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,235 |
py
|
from gvanim import Animation, render, gif
class Node:
def __init__(self, value: int, name: str):
self.value = value
self.left, self.right = None, None
self.name = name
def preorder(root: Node, ga: Animation):
if not root:
return
ga.highlight_node(root.name)
if root.left:
ga.next_step()
ga.highlight_edge(root.name, root.left.name)
ga.next_step()
preorder(root.left, ga)
if root.right:
ga.next_step()
ga.highlight_edge(root.name, root.right.name)
ga.next_step()
preorder(root.right, ga)
def test_case():
node0 = Node(0, 'root')
node1 = Node(1, 'left')
node2 = Node(2, 'right')
node3 = Node(3, 'll-child')
node4 = Node(4, 'lr-child')
node0.left = node1
node0.right = node2
node1.left = node3
node1.right = node4
ga = Animation()
ga.add_edge(node0.name, node1.name)
ga.add_edge(node0.name, node2.name)
ga.add_edge(node1.name, node3.name)
ga.add_edge(node1.name, node4.name)
ga.next_step()
preorder(node0, ga)
graphs = ga.graphs()
for g in graphs:
print(g)
output = render(graphs, 'demo', 'png')
gif(output, 'demo', 50)
|
[
"[email protected]"
] | |
bd2642958ca70142252da805baaf1c98b45dbc0d
|
3eea8b36dfd6633ca7645b0d43d78c6eeb960502
|
/website/migrations/0012_auto_20200606_0444.py
|
a07049f90835cafb4d289531fedf3b271c3f0e81
|
[] |
no_license
|
raifran1/100DaysPB
|
9d5c367b4ad951da13c9f82e78785d51a3941bc3
|
4820aa0e09b9baeb761dc6ec6e262e4e0635bb84
|
refs/heads/master
| 2022-10-14T06:17:29.605900 | 2020-06-13T16:54:06 | 2020-06-13T16:54:06 | 272,049,540 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 719 |
py
|
# Generated by Django 3.0.7 on 2020-06-06 07:44
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('website', '0011_auto_20200606_0444'),
]
operations = [
migrations.AlterField(
model_name='aula',
name='publicado',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 6, 7, 44, 37, 169769, tzinfo=utc)),
),
migrations.AlterField(
model_name='comentario',
name='publicado',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 6, 7, 44, 37, 169769, tzinfo=utc)),
),
]
|
[
"[email protected]"
] | |
fbc8a886afbdfc9782f396e85bce47a631519c2f
|
c3cb06f1107043f93ee3db12bef7abb758b4d002
|
/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests_degen_Alfven_wave.py
|
31b036de8bdab4f0b59a02858a0717a3b8dc1744
|
[
"BSD-2-Clause"
] |
permissive
|
basuparth/nrpytutorial
|
58533f98c260649ece3f35d8777ceabd4bff702f
|
8a6b05f33c17b42034cf21fb66d9ac7946065028
|
refs/heads/master
| 2023-01-03T05:26:53.681493 | 2020-10-23T21:43:23 | 2020-10-23T21:43:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,464 |
py
|
#!/usr/bin/env python
# coding: utf-8
# <a id='top'></a>
#
#
# # $\texttt{GiRaFFEfood}$: Initial data for $\texttt{GiRaFFE}$
#
# ## Alfvén Wave
#
# $$\label{top}$$
#
# This module provides another initial data option for $\texttt{GiRaFFE}$, drawn from [this paper](https://arxiv.org/abs/1310.3274) . This is a flat-spacetime test with initial data
# \begin{align}
# A_x &= 0 \\
# A_y &= \left \{ \begin{array}{lll}\gamma_\mu x - 0.015 & \mbox{if} & x \leq -0.1/\gamma_\mu \\
# 1.15 \gamma_\mu x - 0.03g(x) & \mbox{if} & -0.1/\gamma_\mu \leq x \leq 0.1/\gamma_\mu \\
# 1.3 \gamma_\mu x - 0.015 & \mbox{if} & x \geq 0.1/\gamma_\mu \end{array} \right. , \\
# A_z = &\ y - \gamma_\mu (1-\mu)x ,
# \end{align}
# which generates the magnetic field in the wave frame,
# \begin{align}
# B'^{x'}(x') = &\ 1.0,\ B'^y(x') = 1.0, \\
# B'^z(x') = &\ \left \{ \begin{array}{lll} 1.0 & \mbox{if} & x' \leq -0.1 \\
# 1.0+0.15 f(x') & \mbox{if} & -0.1 \leq x' \leq 0.1 \\
# 1.3 & \mbox{if} & x' \geq 0.1 \end{array} \right. ,
# \end{align}
# and the electric field in the wave frame,
# $$E'^{x'}(x') = -B'^z(0,x') \ \ , \ \ E'^y(x') = 0.0 \ \ , \ \ E'^z(x') = 1.0 .$$
#
# These are converted to the grid frame by
# \begin{align}
# B^x(0,x) = &\ B'^{x'}(\gamma_\mu x) , \\
# B^y(0,x) = &\ \gamma_\mu [ B'^y(\gamma_\mu x) - \mu E'^z(\gamma_\mu x) ] , \\
# B^z(0,x) = &\ \gamma_\mu [ B'^z(\gamma_\mu x) + \mu E'^y(\gamma_\mu x) ] ,
# \end{align}
# and
# \begin{align}
# E^x(0,x) = &\ E'^{x'}(\gamma_\mu x) , \\
# E^y(0,x) = &\ \gamma_\mu [ E'^y(\gamma_\mu x) + \mu B'^z(\gamma_\mu x) ] ,\\
# E^z(0,x) = &\ \gamma_\mu [ E'^z(\gamma_\mu x) - \mu B'^y(\gamma_\mu x) ],
# \end{align}
# and the velocity is given by $$\mathbf{v} = \frac{\mathbf{E} \times \mathbf{B}}{B^2}$$ in flat spacetime. Additionally, $f(x)=1+\sin (5\pi x)$, $-1<\mu<1$ is the wave speed relative to the grid frame and $\gamma_\mu = (1-\mu^2)^{-1/2}$, and $g(x) = \cos (5\pi \gamma_\mu x)/\pi$.
#
# For the eventual purpose of testing convergence, any quantity $Q$ evolves as $Q(t,x) = Q(0,x-\mu t)$
#
# See [previous NRPy+ tutorial module](Tutorial-GiRaFFEfood_NRPy.ipynb) for more general detail on how this is used.
#
# #### Table of Contents:
# 1. [Steps 0-1:](#preliminaries) Preliminaries
# 1. [Step 2:](#step2) Set the vector $A_k$
# 1. [Step 3:](#step3) Set the vectors $B^i$ and $E^i$ for the velocity
# 1. [Step 4:](#step4) Calculate $v^i$
# 1. [Step 5:](#step6) NRPy+ Module Code Validation
#
# <a id='preliminaries'></a>
#
# ### Steps 0-1: Preliminaries \[Back to [top](#top)\]
#
# Here, we will import the NRPy+ core modules and set the reference metric to Cartesian, set commonly used NRPy+ parameters, and set C parameters that will be set from outside the code eventually generated from these expressions. We will also set up a parameter to determine what initial data is set up, although it won't do much yet.
# $$\label{preliminaries}$$
# Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian
import NRPy_param_funcs as par
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import reference_metric as rfm
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = __name__
# <a id='step2'></a>
# ### Set the vector $A_k$
# The vector potential is given as
# \begin{align}
# A_x &= 0 \\
# A_y &= \left \{ \begin{array}{lll}\gamma_\mu x - 0.015 & \mbox{if} & x \leq -0.1/\gamma_\mu \\
# 1.15 \gamma_\mu x - 0.03g(x) & \mbox{if} & -0.1/\gamma_\mu \leq x \leq 0.1/\gamma_\mu \\
# 1.3 \gamma_\mu x - 0.015 & \mbox{if} & x \geq 0.1/\gamma_\mu \end{array} \right. , \\
# A_z &= y - \gamma_\mu (1-\mu)x .
# \end{align}
# First, however, we must set $$\gamma_\mu = (1-\mu^2)^{-1/2}$$ and $$g(x) = \cos (5\pi \gamma_\mu x)/\pi$$.
# $$\label{step2}$$
mu_AW = par.Cparameters("REAL",thismodule,["mu_AW"], -0.5) # The wave speed
M_PI = par.Cparameters("#define",thismodule,["M_PI"], "")
def GiRaFFEfood_NRPy_1D_tests_degen_Alfven_wave(stagger = False):
gammamu = sp.sympify(1)/sp.sqrt(sp.sympify(1)-mu_AW**2)
# We'll use reference_metric.py to define x and y
x = rfm.xxCart[0]
if stagger:
x_p_half = x + sp.Rational(1,2)*gri.dxx[0]
if stagger:
h1_AW = sp.cos(sp.Rational(5,2)*M_PI*(gammamu*x_p_half+sp.Rational(1,10)))
h2_AW = sp.sin(sp.Rational(5,2)*M_PI*(gammamu*x_p_half+sp.Rational(1,10)))
else:
h1_AW = sp.cos(sp.Rational(5,2)*M_PI*(gammamu*x+sp.Rational(1,10)))
h2_AW = sp.sin(sp.Rational(5,2)*M_PI*(gammamu*x+sp.Rational(1,10)))
# Now, we can define the vector potential. We will create three copies of this variable, because the potential is uniquely defined in three zones. Data for $x \leq -0.1/\gamma_\mu$ shall be referred to as "left", data for $-0.1/\gamma_\mu \leq x \leq 0.1/\gamma_\mu$ as "center", and data for $x \geq 0.1/\gamma_\mu$ as "right".
global AD
AD = ixp.zerorank1()
import Min_Max_and_Piecewise_Expressions as noif
bound = sp.Rational(1,10)/gammamu
if stagger:
Ayleft = -sp.Rational(4,5)/M_PI
Aycenter = -sp.Rational(4,5)/M_PI * h1_AW
Ayright = sp.sympify(2)*(gammamu*x_p_half-sp.Rational(1,10))
Azleft = -sp.sympify(2)*(gammamu*x_p_half+sp.Rational(1,10))
Azcenter = -sp.Rational(4,5)/M_PI * h2_AW
Azright = -sp.Rational(4,5)/M_PI
else:
Ayleft = -sp.Rational(4,5)/M_PI
Aycenter = -sp.Rational(4,5)/M_PI * h1_AW
Ayright = sp.sympify(2)*(gammamu*x-sp.Rational(1,10))
Azleft = -sp.sympify(2)*(gammamu*x+sp.Rational(1,10))
Azcenter = -sp.Rational(4,5)/M_PI * h2_AW
Azright = -sp.Rational(4,5)/M_PI
AD[0] = sp.sympify(0)
AD[1] = noif.coord_leq_bound(x,-bound)*Ayleft\
+noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Aycenter\
+noif.coord_greater_bound(x,bound)*Ayright
AD[2] = noif.coord_leq_bound(x,-bound)*Azleft\
+noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Azcenter\
+noif.coord_greater_bound(x,bound)*Azright
# ### Set the vectors $B^i$ and $E^i$ for the velocity
#
# Now, we will set the magnetic and electric fields that we will need to define the initial velocities. First, we need to define $$f(x)=1+\sin (5\pi x);$$ note that in the definition of $B^i$, we need $f(x')$ where $x'=\gamma_\mu x$.
# $$\label{step2}$$
xprime = gammamu*x
bound = sp.Rational(1,10)
phileft = sp.sympify(0)
phicenter = sp.Rational(5,2)*M_PI*(xprime+sp.Rational(1,10))
phiright = sp.Rational(1,2)*M_PI
phi = noif.coord_leq_bound(xprime,-bound)*phileft\
+noif.coord_greater_bound(xprime,-bound)*noif.coord_leq_bound(x,bound)*phicenter\
+noif.coord_greater_bound(xprime,bound)*phiright
# We will now set the magnetic field in the wave frame:
# \begin{align}
# B'^{x'}(x') = &\ 1.0,\ B'^y(x') = 1.0, \\
# B'^z(x') = &\ \left \{ \begin{array}{lll} 1.0 & \mbox{if} & x' \leq -0.1 \\
# 1.0+0.15 f(x') & \mbox{if} & -0.1 \leq x' \leq 0.1 \\
# 1.3 & \mbox{if} & x' \geq 0.1 \end{array} \right. .
# \end{align}
#
BpU = ixp.zerorank1()
BpU[0] = sp.sympify(0)
BpU[1] = sp.sympify(2)*sp.cos(phi)
BpU[2] = sp.sympify(2)*sp.sin(phi)
# Now, we will set the electric field in the wave frame:
# \begin{align}
# E'^{x'}(x') &= -B'^z(0,x'), \\
# E'^y(x') &= 0.0, \\
# E'^z(x') &= 1.0 .
# \end{align}
EpU = ixp.zerorank1()
# Next, we must transform the the fields into the grid frame. We'll do the magnetic fields first.
# \begin{align}
# B^x(0,x) = &\ B'^{x'}(\gamma_\mu x) , \\
# B^y(0,x) = &\ \gamma_\mu [ B'^y(\gamma_\mu x) - \mu E'^z(\gamma_\mu x) ] , \\
# B^z(0,x) = &\ \gamma_\mu [ B'^z(\gamma_\mu x) + \mu E'^y(\gamma_\mu x) ] ,
# \end{align}
#
global BU
BU = ixp.zerorank1()
BU[0] = BpU[0]
BU[1] = gammamu*(BpU[1]-mu_AW*EpU[2])
BU[2] = gammamu*(BpU[2]+mu_AW*EpU[1])
# And now the electric fields:
# \begin{align}
# E^x(0,x) = &\ E'^{x'}(\gamma_\mu x) , \\
# E^y(0,x) = &\ \gamma_\mu [ E'^y(\gamma_\mu x) + \mu B'^z(\gamma_\mu x) ] ,\\
# E^z(0,x) = &\ \gamma_\mu [ E'^z(\gamma_\mu x) - \mu B'^y(\gamma_\mu x) ],
# \end{align}
#
EU = ixp.zerorank1()
EU[0] = EpU[0]
EU[1] = gammamu*(EpU[1]+mu_AW*BpU[2])
EU[2] = gammamu*(EpU[2]-mu_AW*BpU[1])
# <a id='step3'></a>
# ### Calculate $v^i$
#
# Now, we calculate $$\mathbf{v} = \frac{\mathbf{E} \times \mathbf{B}}{B^2},$$ which is equivalent to $$v^i = [ijk] \frac{E^j B^k}{B^2},$$ where $[ijk]$ is the Levi-Civita symbol and $B^2 = \gamma_{ij} B^i B^j$ is a trivial dot product in flat space.
# $$\label{step3}$$
LeviCivitaSymbolDDD = ixp.LeviCivitaSymbol_dim3_rank3()
B2 = sp.sympify(0)
for i in range(3):
# In flat spacetime, gamma_{ij} is just a Kronecker delta
B2 += BU[i]**2 # This is trivial to extend to curved spacetime
global ValenciavU
ValenciavU = ixp.zerorank1()
for i in range(3):
for j in range(3):
for k in range(3):
ValenciavU[i] += LeviCivitaSymbolDDD[i][j][k] * EU[j] * BU[k] / B2
|
[
"[email protected]"
] | |
799ed238049370ca5758cf0a772ef317681ac106
|
2d2527a32797e1107f2c83ee455f9c249d3a2763
|
/Lottery.py
|
6266397111e4bc0333f9161fb5af21295ed4b7ed
|
[] |
no_license
|
Olelewe-PROHUB/Python-Projects
|
a1f2ff66c7a2d6d389759fa26bb6347486b05742
|
3d4a2405f37bb53860fa34ae1efc1aa4ef9b5bdc
|
refs/heads/master
| 2021-02-14T04:28:57.135887 | 2020-03-04T00:11:08 | 2020-03-04T00:11:08 | 244,768,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,746 |
py
|
#The Lottery Program creates a list of 10 Letters and 5 Numbers from a list
#And randomly selects four numbers or letters from the list.
#A message will print saying that if the contestant posesses a matching
#Combination, that they will win the prize
"""Need to import the choice method from the random module"""
from random import choice
class Lotto:
def __init__(self):
"""Initialize the Lotto Attributes"""
self.lists = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'B', 'C', 'F', 'G']
self.combon = []
self.combol = []
"""
The method winnter, is used to determine the correct tickect combo
If the choice is a string, it is assigned to self.combon
If the choice is a number, it is assigned to self.combol
Whoever fills up to a length of 5 firsts, becomes the winning
Combination that is displayed
"""
def winner(self):
i = 0
j = 0
while i < 5 and j < 5:
k = choice(self.lists)
if k in range(1,10):
self.combon.append(k)
i+=1
if k in ['A', 'B', 'C', 'F', 'G']:
self.combol.append(k)
j+=1
if i == 4 or j == 4:
break
if len(self.combon) == 4:
print("To win you must have following number combination: ")
for q in self.combon:
print(f"{q}")
elif len(self.combol) == 4:
print("To win you must have following letter combination: ")
for r in self.combol:
print(f"{r}")
else:
print("No winning Combination has been determined")
prize = Lotto()
prize.winner()
|
[
"[email protected]"
] | |
fe3d3c58722c2387acd324f43f7c8ae75d32030a
|
aabca07ca1c659e5d8ff9e935b46a73d365e1d45
|
/python/candies.py
|
6a17aedd7b94864fd258fc5751896bf3249d2277
|
[] |
no_license
|
colinbruner/leetcode
|
d47aa7b07a5fa7c4bc94bda085f69a0be405b6f7
|
5be75851833b57e37f7d725a6f195662b2a1a068
|
refs/heads/master
| 2022-12-11T14:18:56.200279 | 2020-09-16T22:07:58 | 2020-09-16T22:07:58 | 293,332,008 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 557 |
py
|
class Solution:
# def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
def kidsWithCandies(self, candies, extraCandies):
ans = list()
most_candies = max(candies)
for kid in candies:
if kid + extraCandies >= most_candies:
ans.append(True)
else:
ans.append(False)
return ans
print(Solution().kidsWithCandies([2, 3, 5, 1, 3], 3))
print(Solution().kidsWithCandies([4, 2, 1, 1, 2], 1))
print(Solution().kidsWithCandies([12, 1, 12], 10))
|
[
"[email protected]"
] | |
0c0782612fe16f2c714fe147ada9085f228edca5
|
e3e1429aec04f84078d83bbd3b16646b04adc20f
|
/D03/ex04/django_venv/bin/easy_install-3.8
|
030c1b1a187ed4d81fb9b763b61069eb3a4f8aed
|
[] |
no_license
|
KKWANH/42_Django_Piscine
|
e5b1da1e2b4fcc05898004e4c23d3388ae6b9437
|
d7b2e86da7e8872b42461ee6f3b4ed4b0281b3d9
|
refs/heads/main
| 2023-07-11T12:37:01.503874 | 2021-08-11T14:39:07 | 2021-08-11T14:39:07 | 389,653,788 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 315 |
8
|
#!/Users/kimkwanho/Documents/Programming/42Seoul/Django_Piscine/git/D03/ex04/django_venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
e11e29e082e3b71ce4dd66262c39a752138a0743
|
3d5164c05354f4f3db782efa504670d441963cd6
|
/semestrovka_django_2k/client/migrations/0001_initial.py
|
e876759ec72399d99f1e94cde0ea65f47c77e07f
|
[] |
no_license
|
agalimullin/codeex
|
7efd7c2e966193c8ec0aabae82ffe248d0ea1fa8
|
c4757da23dd05b0be22ffbf0defd608633562d11
|
refs/heads/master
| 2020-12-25T09:48:00.003278 | 2016-07-31T11:29:48 | 2016-07-31T11:29:48 | 64,546,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 878 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254)),
('avatar', models.ImageField(upload_to=b'avatars', blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'profile',
'verbose_name_plural': 'profiles',
},
),
]
|
[
"[email protected]"
] | |
b4f391918f30a778d049bd168cb1ca4154c0b42a
|
3a4fbde06794da1ec4c778055dcc5586eec4b7d2
|
/@lib/12-13-2011-01/vyperlogix/decorators/addto.py
|
979a905e9a18fdcddf2620939aec919f9baa031a
|
[] |
no_license
|
raychorn/svn_python-django-projects
|
27b3f367303d6254af55c645ea003276a5807798
|
df0d90c72d482b8a1e1b87e484d7ad991248ecc8
|
refs/heads/main
| 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 950 |
py
|
__copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def addto(instance):
'''
alias for inject_method_into(instance)
'''
from inject import inject_method_into
return inject_method_into(instance)
|
[
"[email protected]"
] | |
c305892b8de9942ba1433b2aa00240da71b7b0bc
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCloudCloudbaseHttpaccessBindQueryResponse.py
|
ebc27653df46ebfce5e7c7e7b22f0e76998f3f54
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 1,902 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.DomainBind import DomainBind
class AlipayCloudCloudbaseHttpaccessBindQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCloudCloudbaseHttpaccessBindQueryResponse, self).__init__()
self._domain_binds = None
self._page_index = None
self._page_size = None
self._total = None
@property
def domain_binds(self):
return self._domain_binds
@domain_binds.setter
def domain_binds(self, value):
if isinstance(value, list):
self._domain_binds = list()
for i in value:
if isinstance(i, DomainBind):
self._domain_binds.append(i)
else:
self._domain_binds.append(DomainBind.from_alipay_dict(i))
@property
def page_index(self):
return self._page_index
@page_index.setter
def page_index(self, value):
self._page_index = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayCloudCloudbaseHttpaccessBindQueryResponse, self).parse_response_content(response_content)
if 'domain_binds' in response:
self.domain_binds = response['domain_binds']
if 'page_index' in response:
self.page_index = response['page_index']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total' in response:
self.total = response['total']
|
[
"[email protected]"
] | |
e8992ba2dfbde226c943c5ad3695716282a166ef
|
b89e46fadd265777ffdf17ca7a7c2e656c9720e1
|
/coding/firststep/function_default.py
|
d3b04d18c0e07a2f0c967f28e8cde494bb3b0b45
|
[] |
no_license
|
pengchant/enjoy-python
|
9976dcb7e9bbd36fa76ec6c72f04a038f09ade82
|
f13a5c78c4b2986700d9cefa09d39fa662b6a37e
|
refs/heads/master
| 2022-10-21T00:28:07.174259 | 2018-01-26T05:28:01 | 2018-01-26T05:28:01 | 118,450,210 | 1 | 1 | null | 2022-10-06T02:04:53 | 2018-01-22T11:47:36 |
Python
|
UTF-8
|
Python
| false | false | 79 |
py
|
def say(message,times=1):
print(message*times)
say('Hello')
say('World',5)
|
[
"[email protected]"
] | |
27b57af6ca5f6955108cd69191c354f0a95d9c05
|
84eb0bf9a8dc1f6b10fb094c0f40728c0b45320a
|
/gemdaq-testing/setup/scripts/python/v2/kernel/ipbus/ChipsBus.py
|
0bb5a65b2ed936a797b259b55b7ea8960537d24a
|
[] |
no_license
|
jsturdy/gem-daq-code
|
9da017dc15c8b44ad9c6103b0dd78f04cffed1c6
|
008d34c9d4774e23c1c366a0e3afcdd671742427
|
refs/heads/develop
| 2020-12-24T07:05:01.055068 | 2016-06-03T12:57:30 | 2016-06-03T12:57:30 | 17,248,246 | 1 | 3 | null | 2016-06-01T11:25:47 | 2014-02-27T12:37:15 |
C++
|
UTF-8
|
Python
| false | false | 29,098 |
py
|
'''
Created on May 12, 2010
@author: Robert Frazier, Carl Jeske
'''
# System imports
import socket
# Project imports
import IPbusHeader
from TransactionElement import TransactionElement
from Transaction import Transaction
from CommonTools import uInt32HexStr, uInt32Compatible, uInt32BitFlip
from ChipsLog import chipsLog
from ChipsException import ChipsException
class ChipsBusBase(object):
"""Common Hardware Interface Protocol System Bus (CHIPS-Bus) abstract base-class
Allows you to communicate with and control devices running Jeremy Mans's, et al, IP-based
uTCA control system firmware. This base class represents the part of the ChipsBus code
that is protocol-agnostic. Protocol-specific concrete classes, using either UDP or TCP,
derive from this.
The bus assumes 32-bit word addressing, so in a 32-bit address space up to 2^34 bytes in
total can be addressed.
"""
IPBUS_PROTOCOL_VER = 2 # I.e. IPbus Protocol v1.4
SOCKET_BUFFER_SIZE = 32768 # Max UDP/TCP socket buffer size in bytes for receiving packets.
MAX_TRANSACTION_ID = 4095 # The maximum value the transaction ID field can go up to.
# Max depth of a block read or write before bridging the read/write over multiple requests.
# Note that for UDP the max IPBus packet size cannot exceed 368 32-bit words (1472 bytes), or
# it'll fail due to reaching the max Ethernet packet payload size (without using Jumbo Frames).
# If you are Jumbo-Frames capable, then this number should not exceed 2000. Note that the
# jumbo-frames firmware uses a 8192-byte buffer, so we can't make use of the full 9000 byte
# Jumbo Frame anyway.
MAX_BLOCK_TRANSFER_DEPTH = 255 # Temporary hack to get IPbus v2.0 compatible code working
# The max size of the request queue (note: current API excludes ability to queue block transfer requests)
MAX_QUEUED_REQUESTS = 80
def __init__(self, addrTable, hostIp, hostPort, localPort = None):
"""ChipsBus abstract base-class constructor
addrTable: An instance of AddressTable for the device you wish to communicate with.
hostIP: The IP address of the device you want to control, e.g. the string '192.168.1.100'.
hostPort: The network port number of the device you want to control.
localPort: If you wish to bind the socket to a particular local port, then specify the
the local port number here. The default (None) means that the socket will not bind
to any specific local port - an available port be found when it comes to sending any
packets.
"""
object.__init__(self)
self._transactionId = 1
self.addrTable = addrTable
self._hostAddr = (hostIp, hostPort)
self._queuedRequests = [] # Request queue
self._queuedAddrTableItems = [] # The corresponding address table item for each request in the request queue
self._queuedIsARead = [] # This holds a True if the corresponding request in _queuedRequests is a read, or a False if it's a write.
def queueRead(self, name, addrOffset = 0):
"""Create a read transaction element and add it to the transaction queue.
This works in the same way as a normal read(), except that many can be queued
into a packet and dispatched all at once rather than individually. Run the
queued transactions with queueRun().
Only single-register reads/writes can be queued. Block reads/writes, etc, cannot
be queued.
"""
if len(self._queuedRequests) < ChipsBus.MAX_QUEUED_REQUESTS:
chipsLog.debug("Read queued: register '" + name + "' with addrOffset = 0x" + uInt32HexStr(addrOffset))
addrTableItem = self.addrTable.getItem(name) # Get the details of the relevant item from the addr table.
if not addrTableItem.getReadFlag():
raise ChipsException("Read transaction creation error: read is not allowed on register '" + addrTableItem.getName() + "'.")
self._queuedRequests.append(self._createReadTransactionElement(addrTableItem, 1, addrOffset))
self._queuedAddrTableItems.append(addrTableItem)
self._queuedIsARead.append(True)
else:
chipsLog.warning("Warning: transaction not added to queue as transaction queue has reached its maximum length!\n" +
"\tPlease either run or clear the transaction queue before continuing.\n")
def queueWrite(self, name, dataU32, addrOffset = 0):
"""Create a register write (RMW-bits) transaction element and add it to the transaction queue.
This works in the same way as a normal write(), except that many can be queued
into a packet and dispatched all at once rather than individually. Run the
queued transactions with queueRun().
Only single-register reads/writes can be queued. Block reads/writes, etc, cannot
be queued.
"""
if len(self._queuedRequests) < ChipsBus.MAX_QUEUED_REQUESTS:
dataU32 = dataU32 & 0xffffffff # Ignore oversize input.
chipsLog.debug("Write queued: dataU32 = 0x" + uInt32HexStr(dataU32) + " to register '"
+ name + "' with addrOffset = 0x" + uInt32HexStr(addrOffset))
addrTableItem = self.addrTable.getItem(name) # Get the details of the relevant item from the addr table.
if not addrTableItem.getWriteFlag():
raise ChipsException("Write transaction creation error: write is not allowed on register '" + addrTableItem.getName() + "'.")
# self._queuedRequests.append(self._createRMWBitsTransactionElement(addrTableItem, dataU32, addrOffset))
# self._queuedAddrTableItems.append(addrTableItem)
# self._queuedIsARead.append(False)
self._queuedRequests.append(self._createWriteTransactionElement(addrTableItem, [dataU32], addrOffset))
self._queuedAddrTableItems.append(addrTableItem)
self._queuedIsARead.append(False)
else:
chipsLog.warning("Warning: transaction not added to queue as transaction queue has reached its maximum length!\n" +
"\tPlease either run or clear the transaction queue before continuing.\n")
def queueRun(self):
"""Runs the current queue of single register read or write transactions and returns two lists. The
first contains the values read and the second contains the values written.
Note: Only single-register reads/writes can be queued. Block reads/writes, etc, cannot
be queued.
"""
chipsLog.debug("Running all queued transactions")
requestQueueLength = len(self._queuedRequests)
readResponse = []
writeResponse = []
try:
transaction = self._makeAndRunTransaction(self._queuedRequests)
except ChipsException, err:
self.queueClear()
raise ChipsException("Error while running queued transactions:\n\t" + str(err))
for i in range(requestQueueLength):
addrTableItem = self._queuedAddrTableItems[i]
if len(transaction.responses[0].getBody()) > 0:
transactionResponse = transaction.responses[i - requestQueueLength].getBody()[0] & 0xffffffff
transactionResponse = addrTableItem.shiftDataFromMask(transactionResponse)
else:
transactionResponse = 0
if self._queuedIsARead[i]:
readResponse.append(transactionResponse)
chipsLog.debug("Read success! Register '" + addrTableItem.getName() + "' returned: 0x" + uInt32HexStr(transactionResponse))
else:
writeResponse.append(transactionResponse)
chipsLog.debug("Write success! Register '" + addrTableItem.getName() + "' assigned: 0x" + uInt32HexStr(transactionResponse))
self.queueClear()
response = [readResponse, writeResponse]
return response
def queueClear(self):
"""Clears the current queue of transactions"""
chipsLog.debug("Clearing transaction queue")
self._queuedRequests = []
self._queuedAddrTableItems = []
self._queuedIsARead =[]
def read(self, name, addrOffset=0):
"""Read from a single masked/unmasked 32-bit register. The result is returned from the function.
This read transaction runs straight away - i.e it's not queued at all.
Warning: using this method clears any previously queued transactions
that have not yet been run!
name: the register name of the register you want to read from.
addrOffset: optional - provide a 32-bit word offset if you wish.
Notes: Use the addrOffset at your own risk! No checking is done to
see if offsets are remotely sensible!
"""
if len(self._queuedRequests):
chipsLog.warning("Warning: Individual read requested, clearing previously queued transactions!\n")
self.queueClear()
self.queueRead(name, addrOffset)
result = self.queueRun()
return result[0][0]
def write(self, name, dataU32, addrOffset=0):
"""Write to a single register (masked, or otherwise).
This write transaction runs straight away - i.e it's not queued at all.
Warning: using this method clears any previously queued transactions
that have not yet been run!
name: the register name of the register you want to read from.
dataU32: the 32-bit value you want writing
addrOffset: optional - provide a 32-bit word offset if you wish.
Notes:
Use the addrOffset at your own risk! No checking is done to
see if offsets are remotely sensible!
Under the hood, this is implemented as an RMW-bits transaction.
"""
if len(self._queuedRequests):
chipsLog.warning("Warning: Individual write requested, clearing previously queued transactions!\n")
self.queueClear()
dataU32 = dataU32 & 0xffffffff # Ignore oversize input.
self.queueWrite(name, dataU32, addrOffset)
self.queueRun()
def blockRead(self, name, depth=1, addrOffset=0):
"""Block read (not for masked registers!). Returns a list of the read results (32-bit numbers).
The blockRead() transaction runs straight away - it cannot be queued.
name: the register name of the register you want to read from.
depth: the number of 32-bit reads deep you want to go from the start address.
(i.e. depth=3 will return a list with three 32-bit values).
addrOffset: optional - provide a 32-bit word offset if you wish.
Notes: Use the depth and addrOffset at your own risk! No checking is done to
see if these values are remotely sensible!
"""
chipsLog.debug("Block read requested: register '" + name + "' with addrOffset = 0x"
+ uInt32HexStr(addrOffset) + " and depth = " + str(depth))
return self._blockOrFifoRead(name, depth, addrOffset, False)
def fifoRead(self, name, depth=1, addrOffset=0):
"""Non-incrementing block read (not for masked registers!). Returns list of the read results.
Reads from the same address the number of times specified by depth
The fifoRead() transaction runs straight away - it cannot be queued.
name: the register name of the register you want to read from.
depth: the number of 32-bit reads you want to perform on the FIFO
(i.e. depth=3 will return a list with three 32-bit values).
addrOffset: optional - provide a 32-bit word offset if you wish.
Notes: Use the depth and addrOffset at your own risk! No checking is done to
see if these values are remotely sensible!
"""
chipsLog.debug("FIFO read (non-incrementing block read) requested: register '" + name + "' with addrOffset = 0x"
+ uInt32HexStr(addrOffset) + " and depth = " + str(depth))
return self._blockOrFifoRead(name, depth, addrOffset, True)
def blockWrite(self, name, dataList, addrOffset=0):
"""Block write (not for masked registers!).
The blockWrite() transaction runs straight away - it cannot be queued.
name: the register name of the register you want to read from.
dataList: the list of 32-bit values you want writing. The size of the list
determines how deep the block write goes.
addrOffset: optional - provide a 32-bit word offset if you wish.
Notes: Use this at your own risk! No checking is currently done to see if
you will be stomping on any other registers if the dataList or addrOffset
is inappropriate in size!
"""
chipsLog.debug("Block write requested: register '" + name + "' with addrOffset = 0x"
+ uInt32HexStr(addrOffset) + " and depth = " + str(len(dataList)))
return self._blockOrFifoWrite(name, dataList, addrOffset, False)
def fifoWrite(self, name, dataList, addrOffset=0):
"""Non-incrementing block write (not for masked registers!).
Writes all the values held in the dataList to the same register.
The fifoWrite() transaction runs straight away - it cannot be queued.
name: the register name of the register you want to read from.
dataList: the list of 32-bit values you want writing. The size of the list
determines how many writes will be performed on the FIFO.
addrOffset: optional - provide a 32-bit word offset if you wish.
Notes: Use this at your own risk! No checking is currently done to see if
you will be stomping on any other registers if the dataList or addrOffset
is inappropriate in size!
"""
chipsLog.debug("FIFO write (non-incrementing block write) requested: register '" + name + "' with addrOffset = 0x"
+ uInt32HexStr(addrOffset) + " and depth = " + str(len(dataList)))
return self._blockOrFifoWrite(name, dataList, addrOffset, True)
def _getTransactionId(self):
"""Returns the current value of the transaction ID counter and increments.
Note: Transaction ID = 0 will be reserved for byte-order transactions, which
are common and rather uninteresting. For any other kind of transaction, this
can be used to get access to an incrementing counter, that will go from 1->2047
before looping back around to 1.
"""
currentValue = self._transactionId
if self._transactionId < ChipsBus.MAX_TRANSACTION_ID:
self._transactionId += 1
else:
self._transactionId = 1
return currentValue
def _createRMWBitsTransactionElement(self, addrTableItem, dataU32, addrOffset = 0):
"""Returns a Read/Modify/Write Bits Request transaction element (i.e. masked write)
addrTableItem: The relevant address table item you want to perform the RMWBits transaction on.
dataU32: The data (32 bits max, or equal in width to the bit-mask).
addrOffset: The offset on the address specified within the address table item, default is 0.
"""
if not uInt32Compatible(dataU32):
raise ChipsException("Read-Modify-Write Bits transaction creation error: cannot create a RMW-bits " \
"transaction with data values (" + hex(dataU32) +") that are not valid 32-bit " \
"unsigned integers!")
rmwHeader = IPbusHeader.makeHeader(ChipsBus.IPBUS_PROTOCOL_VER, self._getTransactionId(), 1, IPbusHeader.TYPE_ID_RMW_BITS, IPbusHeader.INFO_CODE_REQUEST)
rmwBody = [addrTableItem.getAddress() + addrOffset, \
uInt32BitFlip(addrTableItem.getMask()), \
addrTableItem.shiftDataToMask(dataU32)]
return TransactionElement.makeFromHeaderAndBody(rmwHeader, rmwBody)
def _createWriteTransactionElement(self, addrTableItem, dataList, addrOffset = 0, isFifo = False):
"""Returns a Write Request transaction element (i.e. unmasked/block write)
addrTableItem: The relevant address table item you want to perform the write transaction on.
dataList: The list of 32-bit numbers you want to write (the list size defines the write depth)
addrOffset: The offset on the address specified within the address table item, default is 0.
isFifo: False gives a normal write transaction; True gives a non-incrementing write transaction (i.e. same addr many times).
"""
for value in dataList:
if not uInt32Compatible(value):
raise ChipsException("Write transaction creation error: cannot create a write transaction with data " \
"values (" + hex(value) +") that are not valid 32-bit unsigned integers!")
typeId = IPbusHeader.TYPE_ID_WRITE
if isFifo: typeId = IPbusHeader.TYPE_ID_NON_INCR_WRITE
writeHeader = IPbusHeader.makeHeader(ChipsBusBase.IPBUS_PROTOCOL_VER, self._getTransactionId(), len(dataList), typeId, IPbusHeader.INFO_CODE_REQUEST)
writeBody = [addrTableItem.getAddress() + addrOffset] + dataList
return TransactionElement.makeFromHeaderAndBody(writeHeader, writeBody)
def _createReadTransactionElement(self, addrTableItem, readDepth = 1, addrOffset = 0, isFifo = False):
"""Returns a Read Request transaction element
addrTableItem: The relevant address table item you want to perform the write transaction on.
readDepth: The depth of the read; default is 1, which would be a single 32-bit register read.
addrOffset: The offset on the address specified within the address table item, default is 0.
isFifo: False gives a normal read transaction; True gives a non-incrementing read transaction (i.e. same addr many times).
"""
typeId = IPbusHeader.TYPE_ID_READ
if isFifo: typeId = IPbusHeader.TYPE_ID_NON_INCR_READ
readHeader = IPbusHeader.makeHeader(ChipsBusBase.IPBUS_PROTOCOL_VER, self._getTransactionId(), readDepth, typeId, IPbusHeader.INFO_CODE_REQUEST)
readBody = [addrTableItem.getAddress() + addrOffset]
return TransactionElement.makeFromHeaderAndBody(readHeader, readBody)
def _makeAndRunTransaction(self, requestsList):
"""Constructs, runs and then returns a completed transaction from the given requestsList
requestsList: a list of TransactionElements (i.e. requests from client to the hardware).
Notes: _makeAndRunTransaction will automatically prepend one byte-order transaction.
"""
# Construct the transaction and serialise it - we prepend four byte-order transactions in
# order to ensure we meet minimum Ethernet payload requirements, else funny stuff happens.
transaction = Transaction.constructClientTransaction(requestsList, self._hostAddr)
transaction.serialiseRequests()
chipsLog.debug("Sending packet now.");
try:
# Send the transaction
self._socketSend(transaction)
except socket.error, socketError:
raise ChipsException("A socket error occurred whilst sending the IPbus transaction request packet:\n\t" + str(socketError))
try:
# Get response
transaction.serialResponses = self._socket.recv(ChipsBus.SOCKET_BUFFER_SIZE)
except socket.error, socketError:
raise ChipsException("A socket error occurred whilst getting the IPbus transaction response packet:\n\t" + str(socketError))
chipsLog.debug("Received response packet.");
transaction.deserialiseResponses()
transaction.doTransactionChecks() # Generic transaction checks
self._transactionId = 1 # TEMPORARY IPBUS V2.x HACK! Reset the transaction ID to 1 for each packet.
return transaction
def _initSocketCommon(self, localPort):
"""Performs common socket initialisation (i.e. common to UDP + TCP)"""
if localPort != None:
localAddr = ("", localPort)
self._socket.bind(localAddr)
self._socket.settimeout(1)
def _blockOrFifoRead(self, name, depth, addrOffset, isFifo = False):
"""Common code for either a block read or a FIFO read."""
if depth <= 0:
chipsLog.warn("Ignoring read with depth = 0 from register '" + name + "'!")
return
if depth > ChipsBus.MAX_BLOCK_TRANSFER_DEPTH:
return self._oversizeBlockOrFifoRead(name, depth, addrOffset, isFifo)
addrTableItem = self.addrTable.getItem(name) # Get the details of the relevant item from the addr table.
if addrTableItem.getMask() != 0xffffffff:
raise ChipsException("Block/FIFO read error: cannot perform block or FIFO read on a masked register address!")
try:
if not addrTableItem.getReadFlag(): raise ChipsException("Read transaction creation error: read is not allowed on register '" + addrTableItem.getName() + "'.")
# create and run the transaction and get the response
transaction = self._makeAndRunTransaction( [self._createReadTransactionElement(addrTableItem, depth, addrOffset, isFifo)] )
except ChipsException, err:
raise ChipsException("Block/FIFO read error on register '" + name + "':\n\t" + str(err))
blockReadResponse = transaction.responses[-1] # Block read response will be last in list
chipsLog.debug("Block/FIFO read success! Register '" + name + "' (addrOffset=0x"
+ uInt32HexStr(addrOffset) + ") was read successfully." )
return blockReadResponse.getBody().tolist()
def _oversizeBlockOrFifoRead(self, name, depth, addrOffset, isFifo):
"""Handles a block or FIFO read that's too big to be handled by a single UDP packet"""
chipsLog.debug("Read depth too large for single packet... will automatically split read over many packets")
remainingTransactions = depth
result =[]
offsetMultiplier = 1
if isFifo: offsetMultiplier = 0
while remainingTransactions > ChipsBus.MAX_BLOCK_TRANSFER_DEPTH:
#print "REMAINING=",remainingTransactions
result.extend(self._blockOrFifoRead(name, ChipsBus.MAX_BLOCK_TRANSFER_DEPTH, addrOffset + ((depth - remainingTransactions) * offsetMultiplier), isFifo))
remainingTransactions -= ChipsBus.MAX_BLOCK_TRANSFER_DEPTH
#print "REMAINING: rest=",remainingTransactions
result.extend(self._blockOrFifoRead(name, remainingTransactions, addrOffset + ((depth - remainingTransactions) * offsetMultiplier), isFifo))
return result
def _blockOrFifoWrite(self, name, dataList, addrOffset, isFifo = False):
"""Common code for either a block write or a FIFO write."""
depth = len(dataList)
addrTableItem = self.addrTable.getItem(name) # Get the details of the relevant item from the addr table.
if addrTableItem.getMask() != 0xffffffff:
raise ChipsException("Block/FIFO write error: cannot perform block or FIFO write on a masked register address!")
if depth == 0:
chipsLog.warn("Ignoring block/FIFO write to register '" + name + "': dataList is empty!");
return
elif depth > ChipsBus.MAX_BLOCK_TRANSFER_DEPTH:
return self._oversizeBlockOrFifoWrite(name, dataList, addrOffset, isFifo)
try:
if not addrTableItem.getWriteFlag(): raise ChipsException("Write transaction creation error: write is not allowed on register '" + addrTableItem.getName() + "'.")
# create and run the transaction and get the response
self._makeAndRunTransaction( [self._createWriteTransactionElement(addrTableItem, dataList, addrOffset, isFifo)] )
except ChipsException, err:
raise ChipsException("Block/FIFO write error on register '" + name + "':\n\t" + str(err))
chipsLog.debug("Block/FIFO write success! " + str(depth) + " 32-bit words were written to '"
+ name + "' (addrOffset=0x" + uInt32HexStr(addrOffset) + ")")
def _oversizeBlockOrFifoWrite(self, name, dataList, addrOffset, isFifo):
"""Handling for a block write which is too big for the hardware to handle in one go"""
chipsLog.debug("Write depth too large for single packet... will automatically split write over many packets")
depth = len(dataList)
remainingTransactions = depth
offsetMultiplier = 1
if isFifo: offsetMultiplier = 0
while remainingTransactions > ChipsBus.MAX_BLOCK_TRANSFER_DEPTH:
self._blockOrFifoWrite(name, dataList[(depth - remainingTransactions):(depth - remainingTransactions) + ChipsBus.MAX_BLOCK_TRANSFER_DEPTH],
addrOffset + ((depth - remainingTransactions) * offsetMultiplier), isFifo)
remainingTransactions -= ChipsBus.MAX_BLOCK_TRANSFER_DEPTH
self._blockOrFifoWrite(name, dataList[(depth - remainingTransactions):], addrOffset + ((depth - remainingTransactions) * offsetMultiplier), isFifo)
def _socketSend(self, transaction):
raise NotImplementedError("ChipsBusBase is an Abstract Base Class!\n" \
"Please use a concrete implementation such as ChipsBusUdp or ChipsBusTcp!")
class ChipsBusUdp(ChipsBusBase):
"""Common Hardware Interface Protocol System Bus (CHIPS-Bus) using UDP packets for bus data.
Allows you to communicate with and control devices running Jeremy Mans's, et al, IP-based
uTCA control system firmware. This concrete class uses UDP packets for sending and
receiving the bus data.
"""
def __init__(self, addrTable, hostIp, hostPort, localPort = None):
"""Constructor for ChipsBus over UDP
addrTable: An instance of AddressTable for the device you wish to communicate with.
hostIP: The IP address of the device you want to control, e.g. the string '192.168.1.100'.
hostPort: The network port number of the device you want to control.
localPort: If you wish to bind the socket to a particular local port, then specify the
the local port number here. The default (None) means that the socket will not bind
to any specific local port - an available port be found when it comes to sending any
packets.
"""
ChipsBusBase.__init__(self, addrTable, hostIp, hostPort, localPort)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self._initSocketCommon(localPort)
def _socketSend(self, transaction):
"""Send a transaction (via UDP)"""
self._socket.sendto(transaction.serialRequests, transaction.addr) # UDP-specific
class ChipsBusTcp(ChipsBusBase):
"""Common Hardware Interface Protocol System Bus (CHIPS-Bus) using TCP packets for bus data.
Allows you to communicate with and control devices running Jeremy Mans's, et al, IP-based
uTCA control system firmware. This concrete class uses TCP packets for sending and
receiving the bus data.
"""
def __init__(self, addrTable, hostIp, hostPort, localPort = None):
"""ChipsBus over TCP
addrTable: An instance of AddressTable for the device you wish to communicate with.
hostIP: The IP address of the device you want to control, e.g. the string '192.168.1.100'.
hostPort: The network port number of the device you want to control.
localPort: If you wish to bind the socket to a particular local port, then specify the
the local port number here. The default (None) means that the socket will not bind
to any specific local port - an available port be found when it comes to sending any
packets.
"""
ChipsBusBase.__init__(self, addrTable, hostIp, hostPort, localPort)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP
self._initSocketCommon(localPort)
self._socket.connect((hostIp, hostPort)) # TCP-specific
def _socketSend(self, transaction):
"""Send a transaction (via TCP)"""
self._socket.send(transaction.serialRequests) # TCP-specific
class ChipsBus(ChipsBusUdp):
"""Deprecated! Essentially now just an alias for ChipsBusUdp. Please update
your code replacing usage of ChipsBus with ChipsBusUdp."""
def __init__(self, addrTable, hostIp, hostPort, localPort = None):
ChipsBusUdp.__init__(self, addrTable, hostIp, hostPort, localPort)
chipsLog.warning("Please note: this class has been deprecated - use ChipsBusUdp"\
" in the future if you want the same functionality.")
|
[
"[email protected]"
] | |
595f1c198ad684dade52866bb75294a037bfaa99
|
e2d817d14d1d4cad190a435a17a76168edfa87e4
|
/Encoder.py
|
ba55dd2bdd3e49f3113a22683a643edaaa2baba4
|
[] |
no_license
|
KirillIvano/Snake
|
9560005ad5e3284a01c009908ac6fe1f5fb0e3e6
|
0c2e3e86cb0e4c08242d0b9427487d5bd049026c
|
refs/heads/master
| 2020-03-22T20:20:55.158147 | 2018-08-08T15:52:33 | 2018-08-08T15:52:33 | 140,591,891 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
import math
def encrypt(num):
return (num ** 2 - 10) * 7 - 3
def decrypt(num):
x = math.sqrt(abs((num + 3) / 7 + 10))
if x == int(x):
return int(x)
else:
return 0
print(encrypt(0))
|
[
"[email protected]"
] | |
e379a7cc8b122699f2ec0dd385e8fd29d4fb2681
|
ccb4cbe7bf23a828ee3c2c9100258e0454ae9162
|
/String Split and Join.py
|
3f9e5f64de8e1dc48337608100a315c75cc86e6d
|
[] |
no_license
|
rohanghosh7/Hackerrank_Python_Solution
|
7132d08b5ee30b02e9e9a2df50aa67d02b39fbe6
|
9efa3856916c9c41474bbe6c8ca07ce9fa4fd881
|
refs/heads/master
| 2022-08-10T02:25:51.123439 | 2020-03-30T15:14:34 | 2020-03-30T15:14:34 | 265,371,243 | 0 | 0 | null | 2020-05-19T21:30:32 | 2020-05-19T21:28:18 | null |
UTF-8
|
Python
| false | false | 174 |
py
|
def split_and_join(line):
line = line.replace(" ","-")
return line
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
|
[
"[email protected]"
] | |
4d49580aa0fafe91993759da0f7b959794e84dfc
|
1f7481a40a1d44a58524e3fc4034a16f72ee8619
|
/experiments/depth/TBLogger.py
|
644ab0a8da2d5db24e3adcb449afc8cc24a8a0c9
|
[
"BSD-3-Clause"
] |
permissive
|
princeton-vl/oasis
|
f044f00b3fd6446ffeb19ac62f94b771f5996558
|
5835d24c331d78e91becba29f7e4a53ccd3e376e
|
refs/heads/master
| 2023-06-18T18:00:13.043984 | 2021-07-22T00:34:58 | 2021-07-22T00:34:58 | 283,081,792 | 64 | 7 |
BSD-3-Clause
| 2021-06-10T17:02:59 | 2020-07-28T02:59:39 |
MATLAB
|
UTF-8
|
Python
| false | false | 1,042 |
py
|
'''
TensorBoard logger.
https://pytorch.org/docs/stable/tensorboard.html
'''
import config
from torch.utils.tensorboard import SummaryWriter
class TBLogger(object):
def __init__(self, folder, flush_secs=60):
self.writer = SummaryWriter(log_dir = folder, flush_secs=flush_secs)
def add_value(self, name, value, step):
self.writer.add_scalar(tag = name, scalar_value = value, global_step=step)
def add_image(self, name, value, step, dataformats):
self.writer.add_image(tag = name, img_tensor = value, global_step=step, dataformats=dataformats)
class TBLoggerX(object):
def __init__(self, folder, flush_secs=60):
self.writer = SummaryWriter(log_dir = folder, flush_secs=flush_secs)
def add_value(self, name, value, step):
self.writer.add_scalar(tag = name, scalar_value = value, global_step=step)
def add_image(self, name, value, step, dataformats):
self.writer.add_image(tag = name, img_tensor = value, global_step=step, dataformats=dataformats)
|
[
"[email protected]"
] | |
7ca223afe5153d45121ca9011ccb886e87b49eb5
|
99fddc8762379bcb707ad53081cd342efa7a5d89
|
/test/pinocchio_frame_test.py
|
fa17c45921833826190201d02cca144b699b6959
|
[
"MIT"
] |
permissive
|
zhilinxiong/PyPnC
|
ef19a4bcc366666d2550466b07cd8ec8f098c0c4
|
abf9739c953d19ca57fd4bd37be43415f3d5e4a7
|
refs/heads/master
| 2023-07-04T19:09:26.115526 | 2021-08-03T04:29:10 | 2021-08-03T04:29:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,565 |
py
|
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pinocchio as pin
import numpy as np
urdf_file = cwd + "/robot_model/manipulator/three_link_manipulator.urdf"
model = pin.buildModelFromUrdf(urdf_file)
data = model.createData()
print(model)
q = np.array([np.pi / 2., 0., 0.])
# q = np.zeros(3)
qdot = np.ones(3)
pin.forwardKinematics(model, data, q, qdot)
## Print Frame Names
print([frame.name for frame in model.frames])
## Calculate j2 placement
j2_frame = model.getFrameId('j1')
j2_translation = pin.updateFramePlacement(model, data, j2_frame)
print("j2 translation")
print(j2_translation)
## Calculate l2 placement
l2_frame = model.getFrameId('l2')
l2_translation = pin.updateFramePlacement(model, data, l2_frame)
print("l2 translation")
print(l2_translation)
## Calculate j2 jacobian
pin.computeJointJacobians(model, data, q)
j2_jacobian = pin.getFrameJacobian(model, data, j2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("j2 jacobian")
print(j2_jacobian)
## Calculate l2 jacobian
l2_jacobian = pin.getFrameJacobian(model, data, l2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("l2 jacobian")
print(l2_jacobian)
## Calculate j2 spatial velocity
j2_vel = pin.getFrameVelocity(model, data, j2_frame)
print("j2 vel")
print(j2_vel)
## Calculate l2 spatial velocity
l2_vel = pin.getFrameVelocity(model, data, l2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("l2 vel")
print(l2_vel)
print(np.dot(l2_jacobian, qdot))
|
[
"[email protected]"
] | |
b4836447f7d919821ab1c54198aa56640ac26bdd
|
90025965043e73f4483b63557801e9463bead66c
|
/ex1/ex1_beginner.py
|
71df2f10c11b39487c787f79f510d98676e7b74d
|
[] |
no_license
|
University169/tetrika
|
1436654ccea634d5bb36198a8287d03472eadf79
|
1a96453d6a4b848434246cfa0a1395a8981accdc
|
refs/heads/master
| 2022-11-11T23:49:20.016355 | 2020-07-06T23:27:54 | 2020-07-06T23:27:54 | 274,887,099 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 925 |
py
|
# a = [12, 3, 51, 2, 17, 49, 80, 1, 4, 37]
def search_pairs(array, k):
n = len(array)
b = []
for i in range(n-1):
for j in range(i, n-1):
if (array[i] + array[j]) == k:
t_direct = tuple((array[i], array[j]))
t_inverse = tuple((array[i], array[j]))
if (t_direct not in b) and (t_inverse not in b):
b.append(tuple((array[i], array[j])))
return b
def search_pairs_second(array, k):
n = len(array)
b = []
for i in range(n-1):
for j in range(i, n-1):
if (array[i] + array[j]) == k:
b.append(tuple((array[i], array[j])))
return list(set(b))
print(search_pairs_second([1, 2, 6, 5, 3, 4, 7, 8, 3, 2], 5))
# OUT: >> [(1, 4), (2, 3)]
"""
- Сложность алгоритма O(n**2)
- Можно оптимизировать (см реализацию ex1_optima.py)
"""
|
[
"[email protected]"
] | |
2e808d917489faf59e65fb3ab6a7e999316ec019
|
14a853584c0c1c703ffd8176889395e51c25f428
|
/sem1/fop/lab5/static/strings.py
|
2f47c15c3b3c7d3bd361c700be9a29ee4f30b077
|
[] |
no_license
|
harababurel/homework
|
d0128f76adddbb29ac3d805c235cdedc9af0de71
|
16919f3b144de2d170cd6683d54b54bb95c82df9
|
refs/heads/master
| 2020-05-21T12:25:29.248857 | 2018-06-03T12:04:45 | 2018-06-03T12:04:45 | 43,573,199 | 6 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
"""
Most long messages displayed by the UI will be found here.
"""
from util.Color import bold
STRINGS = {
'helpPrompt':
'Commands:\n' +
'\t%s - displays this prompt.\n' % bold('help') +
'\t%s - adds a new student or assignment.\n' % bold('add') +
'\t%s - displays all students or assignments.\n' % bold('list') +
'\t%s - goes to previous state.\n' % bold('undo') +
'\t%s - goes to next state.\n' % bold('redo') +
'\t%s - clears the screen.\n' % bold('clear') +
'\t%s - saves the work session and exits the application.' % bold('exit')
}
|
[
"[email protected]"
] | |
6b1d1fdaa602c7768fb7a668612821ad314b4395
|
52d797a1a9f853f691d2d6fb233434cf9cc9e12b
|
/Implementation Challenges/Append and Delete.py
|
1e2622a5816301cb9b83c0a56d915bdfe4639df0
|
[] |
no_license
|
harshildarji/Algorithms-HackerRank
|
f1c51fedf2be9e6fbac646d54abccb7e66800e22
|
96dab5a76b844e66e68a493331eade91541fd873
|
refs/heads/master
| 2022-05-21T06:57:59.362926 | 2020-04-19T14:05:19 | 2020-04-19T14:05:19 | 114,212,208 | 11 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 328 |
py
|
# Append and Delete
# https://www.hackerrank.com/challenges/append-and-delete/problem
s, t = input().strip(), input().strip()
k = int(input().strip())
for i in reversed(range(1, k + 1)):
if s == t[:len(s)] and len(t) - len(s) == i or len(s) == 0:
break
s = s[:-1]
print("Yes" if len(t) - len(s) <= i else "No")
|
[
"[email protected]"
] | |
d3c5885435cc1f7e48eb2fec6f3463ab5093bd4b
|
f253f06b816503837744620befad8aed554ec72f
|
/tests/test_perceptron_multi_couches.py
|
a47b5dd51f5cd1e4a96b2b78ce4b4c0197ca259a
|
[] |
no_license
|
julienbrosseau/IFT712-Projet
|
edc67ad4cd2437b57f9bdcb6b2fd1cc391e4c3d5
|
911090fc4a81e2a4f8c38d41543001cb1cc2da33
|
refs/heads/master
| 2020-09-11T04:11:27.170890 | 2019-12-11T22:13:06 | 2019-12-11T22:13:06 | 221,934,906 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,582 |
py
|
# Test du fichier "perceptron_multi_couches.py"
import bin.data_opening as op
import bin.treatment as tr
import bin.perceptron_multi_couches as mlp
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
# Récupération des données
data_opening = op.DataOpening()
data_train = data_opening.get_training_data()
data_test = data_opening.get_testing_data()
data_ref = data_opening.get_referencing_data()
# Traitement des donnees
treatment = tr.Treatment()
data_train = treatment.data_treatment(data_train)
data_test = treatment.data_treatment(data_test)
# Classification par perceptron mutli-couches
mlp = mlp.Mlp()
# Affiliation des donnees
t_train = data_train["Survived"]
x_train = data_train.drop(["Survived"], axis=1)
x_test = data_test
t_test = data_ref["Survived"]
# Entrainement des donnees
mlp.crossValidation()
mlp.fit(x_train, t_train)
predict_train = mlp.predict(x_train)
# Prediction sur les donnees de tests
predic_test = mlp.predict(x_test)
# Affichage des donnees en fonction de leur classification
# Affichage erreurs pour l'entrainement et les tests
print("Erreur d'entrainement : ", (1 - mlp.score(x_train, t_train)) * 100, "%")
print("Erreur de test : ", (1 - mlp.score(x_test, t_test)) * 100, "%")
print("Meilleur hyperparametre : ", mlp.get_best_param())
# Affichage matrice de confusion
# Matrice de confusion
sns.heatmap(confusion_matrix(t_test, predic_test), annot=True, lw=2, cbar=False)
plt.title("Matrice de confusion")
plt.ylabel("Valeurs réelles")
plt.xlabel("Valeurs prédis")
plt.show()
|
[
"[email protected]"
] | |
a3b496709b951d6d6b324d5404e6b63b78605179
|
4f1064197ea9c480eb0d2b883bdac4117b9e4ca4
|
/examples/omxcommand.py
|
8b664068d854546a5213509c2540d76a3e169e07
|
[
"MIT"
] |
permissive
|
Douglas6/omxcontrol
|
b87e8d58b23025baae9c2d5dab3f0b7ecde81e72
|
78bc887f70578d2448d3da321a16ec5ea6e51d3f
|
refs/heads/master
| 2020-05-16T15:48:13.664469 | 2015-03-09T17:49:15 | 2015-03-09T17:49:15 | 30,987,387 | 11 | 1 | null | 2015-05-07T14:08:06 | 2015-02-18T21:16:15 |
Python
|
UTF-8
|
Python
| false | false | 2,050 |
py
|
#!usr/bin env python
import argparse
from omxcontrol import *
parser = argparse.ArgumentParser()
parser.add_argument("cmd", help="omxplayer command")
parser.add_argument("-u", "--user", dest="user", help="omxplayer user")
parser.add_argument("-n", "--name", dest="name", help="omxplayer D-Bus name")
args = parser.parse_args()
try:
omx = OmxControl(user=args.user, name=args.name)
if args.cmd == "1": omx.action(OmxControl.ACTION_DECREASE_SPEED)
elif args.cmd == "2": omx.action(OmxControl.ACTION_INCREASE_SPEED)
elif args.cmd == "<": omx.action(OmxControl.ACTION_REWIND)
elif args.cmd == ">": omx.action(OmxControl.ACTION_FAST_FORWARD)
elif args.cmd == "z": print(omx.properties())
elif args.cmd == "j": omx.action(OmxControl.ACTION_PREVIOUS_AUDIO)
elif args.cmd == "k": omx.action(OmxControl.ACTION_NEXT_AUDIO)
elif args.cmd == "i": omx.action(OmxControl.ACTION_PREVIOUS_CHAPTER)
elif args.cmd == "o": omx.action(OmxControl.ACTION_NEXT_CHAPTER)
elif args.cmd == "n": omx.action(OmxControl.ACTION_PREVIOUS_SUBTITLE)
elif args.cmd == "m": omx.action(OmxControl.ACTION_NEXT_SUBTITLE)
elif args.cmd == "s": omx.action(OmxControl.ACTION_TOGGLE_SUBTITLE)
elif args.cmd == "w": omx.showSubtitles()
elif args.cmd == "x": omx.hideSubtitles()
elif args.cmd == "d": omx.action(OmxControl.ACTION_DECREASE_SUBTITLE_DELAY)
elif args.cmd == "f": omx.action(OmxControl.ACTION_INCREASE_SUBTITLE_DELAY)
elif args.cmd == "q": omx.quit()
elif args.cmd == "p": omx.pause()
elif args.cmd == "-": omx.action(OmxControl.ACTION_DECREASE_VOLUME)
elif args.cmd == "+" or args.cmd == "=": omx.action(OmxControl.ACTION_INCREASE_VOLUME)
elif args.cmd == "<<": omx.action(OmxControl.ACTION_SEEK_BACK_SMALL)
elif args.cmd == ">>": omx.action(OmxControl.ACTION_SEEK_FORWARD_SMALL)
elif args.cmd == "<<<": omx.action(OmxControl.ACTION_SEEK_BACK_LARGE)
elif args.cmd == ">>>": omx.action(OmxControl.ACTION_SEEK_FORWARD_LARGE)
except OmxControlError as ex:
print(ex.message)
|
[
"[email protected]"
] | |
4433c80d2920bbc698bca015e2f1863e260c09e5
|
8e728ab05f880ffe210c58d5fbc60f3cce4b01f2
|
/server/tasks.py
|
f0fc928716f1c11e6472520fb1563a30982b1c79
|
[] |
no_license
|
SanaSystem/sananode
|
adb707ec70d7b17b581d06c7b6b59083999e6492
|
2c38910637a4957ae551327f2ceb1333449fa369
|
refs/heads/master
| 2022-12-10T09:06:13.562720 | 2020-08-16T19:43:30 | 2020-08-16T19:43:30 | 140,470,727 | 2 | 7 | null | 2022-12-08T02:16:18 | 2018-07-10T18:10:31 |
JavaScript
|
UTF-8
|
Python
| false | false | 3,873 |
py
|
from celery import shared_task, task
from celery.task.schedules import schedule
from celery.decorators import periodic_task
from .utils import decompose_medblocks, to_set, to_dict_list, reconstruct_medblocks, remove_duplicates, approved_decompose_medblocks
from .blockchain import retrieve_from_tangle, broadcast_on_tangle, server
import couchdb
from sananode.settings import COUCHDB_ADMIN_BASE_URL
from server.models import SyncParameters
import requests
import json
import time
import ipfsapi
from django.core.cache import cache
LOCK_EXPIRE = 60 * 5
@shared_task
def async_broadcast_on_tangle(list_of_elements):
result = broadcast_on_tangle(list_of_elements)
if len(result) > 0:
return True
else:
return False
@task
def check_iota_sync(email):
# list all documents associated with user
db = server['medblocks']
results, iota_new = retrieve_from_tangle(email)
simple_sync = True
if simple_sync:
docs = [db[medblock.id] for medblock in db.view('preview/patient', key=email)]
db_medfrags = to_set(approved_decompose_medblocks(docs))
iota_medfrags = to_set(results)
transmit_to_iota = db_medfrags - iota_medfrags
print("DB MEDFRAGS: {} , IOTA MEDFRAGS: {}".format(len(db_medfrags), len(iota_medfrags)))
db_update = len(iota_medfrags - db_medfrags) > 0
if len(transmit_to_iota) > 0:
print("Transmitting {} transaction to IOTA".format(len(transmit_to_iota)))
broadcast_on_tangle(to_dict_list(transmit_to_iota))
if db_update:
print("Difference {}".format(iota_medfrags - db_medfrags))
reconstruction_medfrags = iota_medfrags | db_medfrags
reconstruction_medfrags = to_dict_list(reconstruction_medfrags)
new_documents = reconstruct_medblocks(reconstruction_medfrags)
print("Updating {} documents on the database".format(len(new_documents)))
for doc in new_documents:
id = doc['_id']
doc = couchdb.Document(doc)
try:
old_document = db[id]
doc['_rev'] = old_document.rev
db.save(doc)
except couchdb.http.ResourceNotFound:
db[id] = doc
return True
def check_ipfs_sync(email):
db = server['medblocks']
results = db.view('preview/ipfshashes', key=email)
hashes = [r.value for r in results]
for hash in hashes:
check_ipfs_file.delay(hash)
@task
def check_ipfs_file(hash):
print("Syncing ipfs hash {}".format(hash))
client = ipfsapi.Client("ipfs", 5001)
client.cat(hash)
requests.get("https://ipfs.infura.io/ipfs/{}/".format(hash))
requests.get("https://ipfs.infura.io:5001/api/v0/pin/add?arg=/ipfs/{}".format(hash))
requests.get("http://ipfs.io/ipfs/{}/".format(hash))
return
@periodic_task(run_every=5, name="Sync IOTA", ignore_result=True)
def check_all_users():
lock_id = "checkiotasync"
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE)
release_lock = lambda: cache.delete(lock_id)
if acquire_lock():
try:
db = couchdb.Server(COUCHDB_ADMIN_BASE_URL)['_users']
emails = [i.key for i in db.view('preview/list')]
emails = remove_duplicates(emails)
for email in emails:
print("Checking for :{}".format(email))
check_iota_sync(email)
# check_ipfs_sync(email)
finally:
release_lock()
else:
print("Task already running. Will wait for completion")
|
[
"[email protected]"
] | |
98efe801dbeb74dbc82ab21f122b91d51a31aadb
|
a6b09f253a12a0723a1116c67a2c35048e7a5788
|
/unesco/migrations/0002_auto_20201024_0539.py
|
6e41989d57a118a64d3c3be2d4fec628c7d747c7
|
[] |
no_license
|
ravik9158/django_projects
|
bc61b5a35018065873e4f5be6e1c17598e4c0cbb
|
723cb1719a4d386cf3c25a31441f564250f750e0
|
refs/heads/main
| 2023-01-03T21:56:15.867273 | 2020-11-02T16:36:10 | 2020-11-02T16:36:10 | 309,423,604 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 353 |
py
|
# Generated by Django 3.1.1 on 2020-10-24 05:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('unesco', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='category',
new_name='name',
),
]
|
[
"[email protected]"
] | |
d1b8918a50eb13a7832d3eeb130092b3a684ac79
|
4fbafbec0766fe5235d6733aea3459e6dd264689
|
/src/lib/models/utils.py
|
94fecb70e54efdd7f55059a66957671106c74b99
|
[
"MIT"
] |
permissive
|
yangchengjun/HPRNet
|
7a4f51fb1fa2930a705f0de6bb67f23a554b1bce
|
a23e691102ed50bd24391e6295c74f452592cdae
|
refs/heads/master
| 2023-07-31T03:00:50.882141 | 2021-09-27T11:25:38 | 2021-09-27T11:25:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,586 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def flip_tensor(x):
return torch.flip(x, [3])
# tmp = x.detach().cpu().numpy()[..., ::-1].copy()
# return torch.from_numpy(tmp).to(x.device)
def flip_lr(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def flip_lr_off(x, flip_idx, num_kp = 17):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], num_kp, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
|
[
"[email protected]"
] | |
444497d7d6946f0783350675c023b36c83f68076
|
406921ee42acbbc091c60a381eb9289b1897c90e
|
/Examples/Modules/laser_injection/analysis_2d.py
|
fffa7d3a19934eb397da780ec223f442265bd4a8
|
[
"BSD-3-Clause-LBNL",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
lge0303/WarpX
|
328354aaff9b688db693cefc1f76d52a7f39b19f
|
5805f90c6937d135b1fe2f6bd0891d99e7e219e0
|
refs/heads/master
| 2021-09-16T03:40:42.954718 | 2021-08-29T01:50:27 | 2021-08-29T01:50:27 | 160,225,442 | 0 | 0 |
NOASSERTION
| 2021-08-29T02:43:10 | 2018-12-03T17:03:15 |
C++
|
UTF-8
|
Python
| false | false | 6,807 |
py
|
#! /usr/bin/env python
# Copyright 2019 Andrew Myers, Jean-Luc Vay, Maxence Thevenet
# Remi Lehe, Weiqun Zhang, Luca Fedeli
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This file is part of the WarpX automated test suite. Its purpose is to test the
# injection of a Gaussian laser pulse from an antenna in a 2D simulation.
# In order to avoid privileged directions, the laser is injected at
# approximately 27 degrees with respect to the x axis. Moreover the polarization axis is neither
# parallel nor perpendicular to the xz plane. Finally moving window along the
# x axis is enabled.
# The test calculates the envelope of each component of the laser pulse at the end of
# the simulation and it compares it with theory. It also checks that the
# central frequency of the Fourier transform is the expected one.
import yt
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import hilbert
from mpl_toolkits.axes_grid1 import make_axes_locatable
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
# Maximum acceptable error for this test
relative_error_threshold = 0.05
# A small number
small_num = 1.0e-8
# Physical parameters
um = 1.e-6
fs = 1.e-15
c = 299792458
# Parameters of the gaussian beam
wavelength = 1.*um
w0 = 5.*um
tt = 10.*fs
x_c = 10.*um
t_c = 24.*fs
# foc_dist = 13.109*um (not actually used)
E_max = 4e12
# laser direction
dir_vector = np.array([2.,0,1.0])
dir_vector /= np.linalg.norm(dir_vector)
rot_angle = np.arctan(dir_vector[2]/dir_vector[0])
# polarization vector
pol_vector = np.array([1.0,1.0,-2.0])
pol_vector /= np.linalg.norm(pol_vector)
# Calculates the envelope of a Gaussian beam
def gauss_env(T,XX,ZZ):
'''Function to compute the theory for the envelope
'''
Z = np.cos(rot_angle)*(XX-x_c) + np.sin(rot_angle)*ZZ
X = -np.sin(rot_angle)*(XX-x_c) + np.cos(rot_angle)*ZZ
inv_tau2 = 1./tt/tt
inv_w_2 = 1.0/(w0*w0)
exp_arg = - (X*X)*inv_w_2 - inv_tau2 / c/c * (Z-T*c)*(Z-T*c)
return E_max * np.real(np.exp(exp_arg))
# Checks envelope and central frequency for a given laser component
def check_component(data, component, t_env_theory, coeff, X,Z,dx,dz):
print("*** Checking " + component + " ***")
field = data['boxlib', component].v.squeeze()
env = abs(hilbert(field))
env_theory = t_env_theory*np.abs(coeff)
# Plot results
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(221, aspect='equal')
ax1.set_title('PIC field')
p1 = ax1.pcolormesh(X,Z,field)
cax1 = make_axes_locatable(ax1).append_axes('right', size='5%', pad=0.05)
fig.colorbar(p1, cax=cax1, orientation='vertical')
ax2 = fig.add_subplot(222, aspect='equal')
ax2.set_title('PIC envelope')
p2 = ax2.pcolormesh(X,Z,env)
cax2 = make_axes_locatable(ax2).append_axes('right', size='5%', pad=0.05)
fig.colorbar(p2, cax=cax2, orientation='vertical')
ax3 = fig.add_subplot(223, aspect='equal')
ax3.set_title('Theory envelope')
p3 = ax3.pcolormesh(X,Z,env_theory)
cax3 = make_axes_locatable(ax3).append_axes('right', size='5%', pad=0.05)
fig.colorbar(p3, cax=cax3, orientation='vertical')
ax4 = fig.add_subplot(224, aspect='equal')
ax4.set_title('Difference')
p4 = ax4.pcolormesh(X,Z,env-env_theory)
cax4 = make_axes_locatable(ax4).append_axes('right', size='5%', pad=0.05)
fig.colorbar(p4, cax=cax4, orientation='vertical')
plt.tight_layout()
plt.savefig("plt_" + component + ".png", bbox_inches='tight')
if(np.abs(coeff) < small_num):
is_field_zero = np.sum(np.abs(env)) < small_num
if is_field_zero :
print("[OK] Field component expected to be 0 is ~ 0")
else :
print("[FAIL] Field component expected to be 0 is NOT ~ 0")
assert(is_field_zero)
print("******\n")
return
relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env_theory))
is_env_ok = relative_error_env < relative_error_threshold
if is_env_ok :
print("[OK] Relative error envelope: {:6.3f} %".format(relative_error_env*100))
else :
print("[FAIL] Relative error envelope: {:6.3f} %".format(relative_error_env*100))
assert(is_env_ok)
fft_field = np.fft.fft2(field)
freq_rows = np.fft.fftfreq(fft_field.shape[0],dx/c)
freq_cols = np.fft.fftfreq(fft_field.shape[1],dz/c)
pos_max = np.unravel_index(np.abs(fft_field).argmax(), fft_field.shape)
freq = np.sqrt((freq_rows[pos_max[0]])**2 + (freq_cols[pos_max[1]]**2))
exp_freq = c/wavelength
relative_error_freq = np.abs(freq-exp_freq)/exp_freq
is_freq_ok = relative_error_freq < relative_error_threshold
if is_freq_ok :
print("[OK] Relative error frequency: {:6.3f} %".format(relative_error_freq*100))
else :
print("[FAIL] Relative error frequency: {:6.3f} %".format(relative_error_freq*100))
assert(is_freq_ok)
print("******\n")
def check_laser(filename):
ds = yt.load(filename)
# yt 4.0+ has rounding issues with our domain data:
# RuntimeError: yt attempted to read outside the boundaries
# of a non-periodic domain along dimension 0.
if 'force_periodicity' in dir(ds): ds.force_periodicity()
x = np.linspace(
ds.domain_left_edge[0].v,
ds.domain_right_edge[0].v,
ds.domain_dimensions[0])
dx = (ds.domain_right_edge[0].v-ds.domain_left_edge[0].v)/(ds.domain_dimensions[0]-1)
z = np.linspace(
ds.domain_left_edge[1].v,
ds.domain_right_edge[1].v,
ds.domain_dimensions[1])
dz = (ds.domain_right_edge[1].v-ds.domain_left_edge[1].v)/(ds.domain_dimensions[1]-1)
X, Z = np.meshgrid(x, z, indexing='ij')
# Compute the theory for envelope
env_theory = gauss_env(+t_c-ds.current_time.to_value(),X,Z)+gauss_env(-t_c+ds.current_time.to_value(),X,Z)
# Read laser field in PIC simulation, and compute envelope
all_data_level_0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
b_vector = np.cross(dir_vector, pol_vector)
components = ["Ex", "Ey", "Ez", "Bx", "By", "Bz"]
coeffs = [
pol_vector[0],
pol_vector[1],
pol_vector[2],
b_vector[0],
b_vector[1],
b_vector[2]]
field_facts = [1, 1, 1, 1/c, 1/c, 1/c]
for comp, coeff, field_fact in zip(components, coeffs, field_facts):
check_component(all_data_level_0, comp, field_fact*env_theory, coeff, X, Z, dx, dz)
def main():
filename_end = sys.argv[1]
check_laser(filename_end)
test_name = filename_end[:-9] # Could also be os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename_end)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
2118f12a1e03b7b2a2d66488ad573442c9f8c2ca
|
5a242ab085c26ff1c9893341714267a7a2628c9b
|
/analysis/codelists.py
|
5886e84b0c4497add341e767b3c99652d92b7961
|
[
"MIT"
] |
permissive
|
opensafely/ADTinj
|
4859ad060061e6e21ccb00bfb6af437c905c0f5e
|
00cdba64b00a4295c1ac4ac6006c2a632764d5c9
|
refs/heads/main
| 2023-08-23T10:41:33.839726 | 2023-06-28T21:13:34 | 2023-06-28T21:13:34 | 488,564,228 | 0 | 0 |
MIT
| 2023-06-28T21:13:35 | 2022-05-04T11:39:08 |
Python
|
UTF-8
|
Python
| false | false | 1,789 |
py
|
from cohortextractor import (
codelist_from_csv,
codelist,
)
ethnicity_codes = codelist_from_csv(
"codelists/opensafely-ethnicity-snomed-0removed.csv",
system="snomed",
column="snomedcode",
category_column="Grouping_6",
)
ADTinj = codelist_from_csv(
"codelists/user-agleman-adt-injectable-dmd.csv",
system="snomed",
column="dmd_id",
)
ADTinj1 = codelist_from_csv(
"codelists/user-agleman-adt-inj-1monthly-dmd.csv",
system="snomed",
column="dmd_id",
)
ADTinj3 = codelist_from_csv(
"codelists/user-agleman-adt-inj-3monthly-dmd.csv",
system="snomed",
column="dmd_id",
)
ADTinj6 = codelist_from_csv(
"codelists/user-agleman-adt-inj-6monthly-dmd.csv",
system="snomed",
column="dmd_id",
)
ADToral = codelist_from_csv(
"codelists/user-agleman-oral-adt-prostate-ca-dmd.csv",
system="snomed",
column="dmd_id",
)
prostate_cancer_codes = codelist_from_csv(
"codelists/user-agleman-prostate_cancer_snomed.csv",
system="snomed",
column="code",
)
ADTsecond_gener = codelist_from_csv(
"codelists/user-agleman-second-generation-antiandrogens3-dmd.csv",
system="snomed",
column="dmd_id",
)
# high cost drugs from the hospital - this is not avaiable pass 3 2020 - not usable
# Abiraterone
# abiraterone = codelist(
# ["abiraterone", "abiraterone acetate", "abiraterone acetate 500mg", "abiraterone acetate 500mg tablets", "Zytiga 500mg tablets", "Zytiga 500mg tablets (Janssen-Cilag Ltd)"],
# system="ctv3"
# )
#hcd = codelist(enzalutamide,abiraterone,darolutamide,apalutamide
#
# ["abiraterone", "abiraterone acetate", "abiraterone acetate 500mg", "abiraterone acetate 500mg tablets", "Zytiga 500mg tablets", "Zytiga 500mg tablets (Janssen-Cilag Ltd)"],
# system="ctv3"
# )
|
[
"[email protected]"
] | |
b55e183fd9206b56ad92286fde3c44ea50764e3c
|
d6a20ddd9db870d66916c77247f8746195926d7d
|
/Main project/skill_com/ui_skills.py
|
3927066ea5ef1c72223b8e63e8fc6e4a316f3d52
|
[] |
no_license
|
mersadcam/db_linkedin_uni
|
3e9f9f6e1431e22772b82252fe05f3ba9c49f054
|
d226fe7dc942236608ccf0307beafb298cbd07f2
|
refs/heads/main
| 2023-06-26T10:52:10.824433 | 2021-07-26T03:21:33 | 2021-07-26T03:21:33 | 384,461,636 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,737 |
py
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'skills.ui'
##
## Created by: Qt User Interface Compiler version 6.1.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
import resources.resources_rc
class Ui_Skills(object):
def setupUi(self, Skills):
if not Skills.objectName():
Skills.setObjectName(u"Skills")
Skills.resize(1114, 850)
Skills.setMinimumSize(QSize(1111, 850))
Skills.setMaximumSize(QSize(1602, 850))
Skills.setStyleSheet(u"background:rgba(248, 253, 255, 206);\n"
"color:rgb(1, 31, 54);\\n")
self.centralwidget = QWidget(Skills)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(u"verticalLayout")
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.back_pushButton = QPushButton(self.centralwidget)
self.back_pushButton.setObjectName(u"back_pushButton")
self.back_pushButton.setCursor(QCursor(Qt.PointingHandCursor))
self.back_pushButton.setStyleSheet(u"")
icon = QIcon()
icon.addFile(u":/images/back.png", QSize(), QIcon.Normal, QIcon.Off)
self.back_pushButton.setIcon(icon)
self.back_pushButton.setIconSize(QSize(28, 28))
self.back_pushButton.setFlat(True)
self.horizontalLayout_4.addWidget(self.back_pushButton)
self.label_4 = QLabel(self.centralwidget)
self.label_4.setObjectName(u"label_4")
font = QFont()
font.setFamilies([u"Nimbus Roman"])
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
self.label_4.setFont(font)
self.label_4.setTextFormat(Qt.AutoText)
self.horizontalLayout_4.addWidget(self.label_4)
self.horizontalSpacer_3 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer_3)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(self.horizontalSpacer)
self.edit_pushButton = QPushButton(self.centralwidget)
self.edit_pushButton.setObjectName(u"edit_pushButton")
self.edit_pushButton.setCursor(QCursor(Qt.PointingHandCursor))
self.edit_pushButton.setStyleSheet(u"QPushButton:hover\n"
"{\n"
" border:1px solid grey;\n"
" border-radius:5px;\n"
" background:rgba(214, 239, 255, 231);\n"
"}")
icon1 = QIcon()
icon1.addFile(u":/images/edit_icon2.png", QSize(), QIcon.Normal, QIcon.Off)
self.edit_pushButton.setIcon(icon1)
self.edit_pushButton.setIconSize(QSize(35, 35))
self.edit_pushButton.setFlat(True)
self.horizontalLayout.addWidget(self.edit_pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.scrollArea = QScrollArea(self.centralwidget)
self.scrollArea.setObjectName(u"scrollArea")
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName(u"scrollAreaWidgetContents")
self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 1094, 693))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
Skills.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(Skills)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 1114, 22))
Skills.setMenuBar(self.menubar)
self.statusbar = QStatusBar(Skills)
self.statusbar.setObjectName(u"statusbar")
Skills.setStatusBar(self.statusbar)
self.retranslateUi(Skills)
QMetaObject.connectSlotsByName(Skills)
# setupUi
def retranslateUi(self, Skills):
Skills.setWindowTitle(QCoreApplication.translate("Skills", u"MainWindow", None))
self.back_pushButton.setText("")
self.label_4.setText(QCoreApplication.translate("Skills", u"Skills and endorsment", None))
self.edit_pushButton.setText("")
# retranslateUi
|
[
"[email protected]"
] | |
cdbfa1646185540c98eb700f25ced9365adf3ea5
|
2359121ebcebba9db2cee20b4e8f8261c5b5116b
|
/configs_pytorch/f92-all_pt.py
|
24f40fe7c10ebd9bd5510af10002262937b2188d
|
[] |
no_license
|
EliasVansteenkiste/plnt
|
79840bbc9f1518c6831705d5a363dcb3e2d2e5c2
|
e15ea384fd0f798aabef04d036103fe7af3654e0
|
refs/heads/master
| 2021-01-20T00:34:37.275041 | 2017-07-20T18:03:08 | 2017-07-20T18:03:08 | 89,153,531 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,436 |
py
|
#copy of j25
import numpy as np
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import math
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 3,
'n_labels': 17}
#only lossless augmentations
p_augmentation = {
'rot90_values': [0,1,2,3],
'flip': [0, 1]
}
# mean and std values for imagenet
mean=np.asarray([0.485, 0.456, 0.406])
mean = mean[:, None, None]
std = np.asarray([0.229, 0.224, 0.225])
std = std[:, None, None]
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x -= mean
x /= std
x = x.astype(np.float32)
x = data_transforms.random_lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x -= mean
x /= std
x = x.astype(np.float32)
return x
def label_prep_function(x):
#cut out the label
return x
# data iterators
batch_size = 32
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=5)
print len(folds)
train_ids = folds[0] + folds[1] + folds[2] + folds[3]
valid_ids = folds[4]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
valid_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test-jpg',
batch_size=chunk_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2-jpg',
batch_size=chunk_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
import tta
tta = tta.LosslessTTA(p_augmentation)
tta_test_data_iterator = data_iterators.TTADataGenerator(dataset='test-jpg',
tta = tta,
duplicate_label = False,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_test2_data_iterator = data_iterators.TTADataGenerator(dataset='test2-jpg',
tta = tta,
duplicate_label = False,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_valid_data_iterator = data_iterators.TTADataGenerator(dataset='train-jpg',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 40
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(10 * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-2,
int(max_nchunks * 0.3): 2e-2,
int(max_nchunks * 0.6): 1e-2,
int(max_nchunks * 0.8): 3e-3,
int(max_nchunks * 0.9): 1e-3
}
# model
from collections import OrderedDict
class MyDenseNet(nn.Module):
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(MyDenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
self.blocks = []
final_num_features = 0
for i, num_layers in enumerate(block_config):
block = torchvision.models.densenet._DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
self.blocks.append(block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = torchvision.models.densenet._Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.classifier_drop = nn.Dropout(p=0.75)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = self.classifier_drop(out)
out = F.avg_pool2d(out, kernel_size=7).view(features.size(0), -1)
out = self.classifier(out)
return out
def my_densenet169(pretrained=False, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MyDenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32))
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.densenet.model_urls['densenet169']))
return model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.densenet = my_densenet169(pretrained=True)
self.densenet.classifier = nn.Linear(self.densenet.classifier.in_features, p_transform["n_labels"])
self.densenet.classifier.weight.data.zero_()
def forward(self, x):
x = self.densenet(x)
return F.sigmoid(x)
def build_model():
net = Net()
return namedtuple('Model', [ 'l_out'])( net )
# loss
class MultiLoss(torch.nn.modules.loss._Loss):
def __init__(self, weight):
super(MultiLoss, self).__init__()
self.weight = weight
def forward(self, input, target):
torch.nn.modules.loss._assert_no_grad(target)
weighted = (self.weight*target)*(input-target)**2 +(1-target)*(input-target)**2
return torch.mean(weighted)
def build_objective():
return MultiLoss(5.0)
def build_objective2():
return MultiLoss(1.0)
def score(gts, preds):
return app.f2_score_arr(gts, preds)
# updates
def build_updates(model, learning_rate):
return optim.SGD(model.parameters(), lr=learning_rate,momentum=0.9,weight_decay=0.0002)
|
[
"[email protected]"
] | |
eef7db4955b67a28aa9e471f54b63762353cd3d9
|
7bc6dfc393d9b3ddba1092f08cb10abfc75b285d
|
/case/__init__.py
|
3c0f0614f4619bf5aaa9c705f99ad98475ea0058
|
[] |
no_license
|
ellezdi0808/casewrite
|
ae9873b96cc1e006d32774cf5caa7d8bef73a6ed
|
70f832634f2cbe5ae17b5a9e589ba7165b83ced6
|
refs/heads/master
| 2020-06-25T19:25:31.792185 | 2018-01-17T06:32:34 | 2018-01-17T06:32:34 | 96,983,929 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,166 |
py
|
from flask import Blueprint
case = Blueprint('case',__name__)
#Blueprint new 一个实例,article是终结点,链接从article开始
from case.views import *
case_list = CaseList.as_view('case_list')
case.add_url_rule('/list/',view_func=case_list)
case.add_url_rule('/list/<int:page>/',view_func=case_list)
case.add_url_rule('/add/',view_func=CaseAdd.as_view('add_case'))
case.add_url_rule('/getModule/',view_func=CaseAddGetModule.as_view('get_module'))
case.add_url_rule('/edit/<id>/',view_func=CaseEdit.as_view('case_edit'))
case.add_url_rule('/check/<id>/',view_func=CaseCheck.as_view('case_check'))
case.add_url_rule('/del/<id>/',view_func=CaseDelete.as_view('case_delete'))
case.add_url_rule('/saveEdit/',view_func=CaseSaveEdit.as_view('save_edit'))
case.add_url_rule('/caseExcute/',view_func=CaseExcute.as_view('case_excute'))
case.add_url_rule('/batch/add/',view_func=CaseBatchAdd.as_view('case_batchadd'))
case.add_url_rule('/get-json/',view_func=CaseGetJson.as_view('get_json'))
home_page = CaseSystemHomePage.as_view('homePage')
case.add_url_rule('/homepage/',view_func=home_page)
case.add_url_rule('/homepage/<int:page>/',view_func=home_page)
|
[
"[email protected]"
] | |
ce041097a6930749d148a9937e8933fa38c20cca
|
63182d8972f176d86ac114bbcae153976ee4b20f
|
/day_1.py
|
7d6762c9cf169771379658b6b78bd5a281bfed58
|
[] |
no_license
|
yasirdin/advent-of-code-2019
|
a60c7db2a1453e95fadea49891e87bf39dd3ebc5
|
d8607bd4aef4496f88b775ce91c267ca8e85b63a
|
refs/heads/master
| 2020-09-22T10:52:04.471620 | 2019-12-08T13:37:35 | 2019-12-08T13:37:35 | 225,162,927 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,354 |
py
|
import logging
import math
from typing import List
logging.basicConfig(level=logging.INFO)
# PART 1 : What is the sum of the fuel requirements?
def parse_txt_file(filepath: str) -> List[int]:
with open(filepath) as f:
return [int(mass) for mass in f.readlines()]
mass_of_nodes = parse_txt_file('day-1.txt')
def fuel_required_given_mass(mass: int) -> int:
return math.floor(mass / 3) - 2
def total_fuel_required(mass_of_nodes: List[int]) -> int:
fuels_required = [fuel_required_given_mass(mass) for mass in mass_of_nodes]
return sum(fuels_required)
logging.info(f'Total fuel required: {total_fuel_required(mass_of_nodes)}')
# PART 2: What is the sum of the fuel requirements for all the modules?
def get_fuel_required_for_all_modules(mass_of_nodes: List[int]) -> int:
fuel_required_for_modules = []
for mass in mass_of_nodes:
fuel_required = fuel_required_given_mass(mass)
fuel_required_for_modules.append(fuel_required_given_mass(mass))
while fuel_required > 0:
fuel_required = fuel_required_given_mass(fuel_required)
if fuel_required > 0:
fuel_required_for_modules.append(fuel_required)
return sum(fuel_required_for_modules)
logging.info(
f'Total fuel required for all modules: {get_fuel_required_for_all_modules(mass_of_nodes)}'
)
|
[
"[email protected]"
] | |
e29a9590d83b44ba45526f334ad2ee6c6853f587
|
bfca42cd7d71d3beedb4c85374f81561ce28cb2e
|
/project_celery/project_celery/settings.py
|
028243aa0e41ce73f5b0943ff37b3a33f44b554b
|
[] |
no_license
|
johnaflorez/email-microservices
|
4efae03bc5d368f5104f427d8ad4a60fe4b9f05a
|
f42f13a21fa4ea00891894f208fa6c8c25757039
|
refs/heads/master
| 2020-03-21T16:40:11.502169 | 2018-07-04T16:37:17 | 2018-07-04T16:37:17 | 138,784,980 | 0 | 0 | null | 2018-06-26T19:41:38 | 2018-06-26T19:41:37 | null |
UTF-8
|
Python
| false | false | 3,232 |
py
|
"""
Django settings for project_celery project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vv=2ly_^j4p@^%0mbq463dwpkq2%(yr064a3f6ufuep%$h%39m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'read_file',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_celery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_celery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"[email protected]"
] | |
82271a49c22deb170f63fd3232c33d3a7f82602e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyDunderSlotsInspection/inheritedClassAttrAssignmentAndOwnWithAttrAndInheritedSlots.py
|
307acdbdb1c8afd488293f4deb2a1b2e092d9960
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 243 |
py
|
class B(object):
attr = 'baz'
__slots__ = ['f', 'b']
class C(B):
__slots__ = ['attr', 'bar']
C.attr = 'spam'
print(C.attr)
c = C()
<warning descr="'C' object attribute 'attr' is read-only">c.attr</warning> = 'spam'
print(c.attr)
|
[
"[email protected]"
] | |
007da86134bd9cf81656b9de3a4b00e9262caadf
|
0bce7412d58675d6cc410fa7a81c294ede72154e
|
/Python3/0983. Minimum Cost For Tickets.py
|
67eeee126a10f3fbd09cd9f37ac9a746033d4c3f
|
[] |
no_license
|
yang4978/LeetCode
|
9ddf010b0f1dda32cddc7e94c3f987509dea3214
|
6387d05b619d403414bad273fc3a7a2c58668db7
|
refs/heads/master
| 2022-01-15T04:21:54.739812 | 2021-12-28T12:28:28 | 2021-12-28T12:28:28 | 182,653,666 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 727 |
py
|
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
# end = days[-1] + 1
# dp = [0]*end
# for d in range(1,end):
# temp = dp[d-1] + costs[0]
# temp = min(temp,min(dp[max(0,d-7):d])+costs[1])
# temp = min(temp,min(dp[max(0,d-30):d])+costs[2])
# if d not in days:
# temp = min(temp,dp[d-1])
# dp[d] = temp
# return dp[-1]
ans = [0]*(days[-1]+30)
for d in range(len(ans)):
if d in days:
ans[d] = min(ans[d-1]+costs[0],ans[d-7]+costs[1],ans[d-30]+costs[2])
else:
ans[d] = ans[d-1]
return ans[-1]
|
[
"[email protected]"
] | |
63043710e6f6c2dcd39608e13ea8009e4915f17a
|
be07a1489063fc8724674114b0cba037caa53163
|
/server/src/candidates_guide.py
|
69240c74d4352289656044a01a2c2bbd2d112f80
|
[] |
no_license
|
prstcsnpr/CandidatesGuide
|
ed6bc9692dd2f625925e4655250bc33036fb7862
|
0eb65ea06505befc84b72863ec1a6650452384b3
|
refs/heads/master
| 2016-09-02T00:03:45.909327 | 2014-06-05T07:35:10 | 2014-06-05T07:35:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,878 |
py
|
# -*- coding: utf-8 -*-
import json
import string
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
class CandidatesRecommendationHandler(tornado.web.RequestHandler):
def post(self):
sat = self.get_argument("sat")
toefl = self.get_argument("toefl")
discipline = self.get_argument("discipline")
school_list = self.__get_school_list()
json_result = self.__generate_json(school_list)
self.set_header("Access-Control-Allow-Origin", "*")
self.write(json_result)
def __generate_json(self, result):
json_result = {}
json_result["result"] = result
json_result["error"] = 0
json_result["description"] = "No error"
return json.dumps(json_result)
def __get_school_list(self):
list = []
school_info1 = {}
school_info1["id"] = 1
school_info1["name"] = "哈佛大学"
school_info1["rate"] = "80%"
school_info2 = {}
school_info2["id"] = 2
school_info2["name"] = "麻省理工学院"
school_info2["rate"] = "70%"
school_info3 = {}
school_info3["id"] = 3
school_info3["name"] = "吉林大学"
school_info3["rate"] = "69%"
list.append(school_info1)
list.append(school_info2)
list.append(school_info3)
return list
class SchoolInfosHandler(tornado.web.RequestHandler):
def get(self):
school_list = self.__get_school_list()
json_result = self.__generate_json(school_list)
self.set_header("Access-Control-Allow-Origin", "*")
self.write(json_result)
def __generate_json(self, result):
json_result = {}
json_result["result"] = result
json_result["error"] = 0
json_result["description"] = "No error"
return json.dumps(json_result)
def __get_school_list(self):
list = []
school_info1 = {}
school_info1["id"] = 1
school_info1["name"] = "哈佛大学"
school_info1["englishName"] = "HARVARD"
school_info1["sat"] = 100
school_info1["toefl"] = 110
school_info2 = {}
school_info2["id"] = 2
school_info2["name"] = "麻省理工学院"
school_info2["englishName"] = "MIT"
school_info2["sat"] = 100
school_info2["toefl"] = 100
school_info3 = {}
school_info3["id"] = 3
school_info3["name"] = "吉林大学"
school_info3["englishName"] = "JLU"
school_info3["sat"] = 90
school_info3["toefl"] = 90
list.append(school_info1)
list.append(school_info2)
list.append(school_info3)
return list
class SchoolInfoHandler(tornado.web.RequestHandler):
def get(self, school_id):
school_info = self.__get_school_info(string.atoi(school_id))
json_result = self.__generate_json(school_info)
self.set_header("Access-Control-Allow-Origin", "*")
self.write(json_result)
def __generate_json(self, result):
json_result = {}
json_result["result"] = result
json_result["error"] = 0
json_result["description"] = "No error"
return json.dumps(json_result)
def __get_school_info(self, id):
if 1 == id:
return self.__get_1_school_info()
elif 2 == id:
return self.__get_2_school_info()
elif 3 == id:
return self.__get_3_school_info()
def __get_1_school_info(self):
school_info = {}
school_info["schoolID"] = 1
school_info["schoolName"] = "哈佛大学"
school_info["schoolProfile"] = "一大堆情况介绍"
school_info["admissionStatus"] = "一大堆文字"
return school_info
def __get_2_school_info(self):
school_info = {}
school_info["schoolID"] = 2
school_info["schoolName"] = "麻省理工学院"
school_info["schoolProfile"] = "两大堆情况介绍"
school_info["admissionStatus"] = "两大堆文字"
return school_info
def __get_3_school_info(self):
school_info = {}
school_info["schoolID"] = 3
school_info["schoolName"] = "吉林大学"
school_info["schoolProfile"] = "三大堆情况介绍"
school_info["admissionStatus"] = "三大堆文字"
return school_info
application = tornado.web.Application([
(r"/", MainHandler),
(r"/schoolinfo/([0-9]+)", SchoolInfoHandler),
(r"/schoolinfos", SchoolInfosHandler),
(r"/candidatesrecommendation", CandidatesRecommendationHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
[
"[email protected]"
] | |
29dd49992c3e6dc309501706541af5a0e95c0c2e
|
3d2f75fa39b89cc7c3c3d6e1d7d34f15cd9571ad
|
/myshop/settings.py
|
524fa6777377996838c6551f203d2e0f12a296f5
|
[] |
no_license
|
ifranaiyubali/Django_My_Shop
|
15a460dc9d6529d456e251853269f989cc035159
|
f2232f5f58f6521175dca6bd2a95be335478f50d
|
refs/heads/master
| 2023-03-09T01:26:16.497838 | 2021-02-22T20:10:19 | 2021-02-22T20:10:19 | 341,316,185 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,646 |
py
|
"""
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wf2)71s-vdcl6-7r=anm6e_@=%+*@vd#ec$*^7$wjc!q4i!n+1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop',
'cart',
'orders',
'paypal.standard.ipn',
'payment',
'coupons',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
from django.utils.translation import gettext_lazy as _
LANGUAGES = (
('en', ('English')),
('es', ('Spanish')),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
CART_SESSION_ID = 'cart'
# django-paypal settings
PAYPAL_RECEIVER_EMAIL = '[email protected]'
PAYPAL_TEST = True
|
[
"[email protected]"
] | |
ce6a3a58db7c01381630f48835bfc1305091fc2d
|
b663080c9d5680ce5d14de04ce390090c0b87f0e
|
/main_app/migrations/0001_initial.py
|
ead52c25129766a2adf002e2686fd31296d3c59d
|
[] |
no_license
|
isama22/datura.metel
|
1c85ccbed83fea4c4248e436611b638c412ad365
|
bd9a4af8742fc639e5b147c3122708434e658927
|
refs/heads/master
| 2023-04-16T09:21:34.656802 | 2021-04-29T17:50:05 | 2021-04-29T17:50:05 | 303,929,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 553 |
py
|
# Generated by Django 3.1.2 on 2020-10-22 01:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(max_length=1000)),
],
),
]
|
[
"[email protected]"
] | |
93f3d82a3dbde659163043e13cd766201e977797
|
6b05bddf2e294c8e1b39846aecadfa06b4ff805d
|
/test/test_v1_guest_agent_ping.py
|
b5518c61004a78ef0ce9d3cb39339b04acf71066
|
[
"Apache-2.0"
] |
permissive
|
kubevirt/client-python
|
5ca82fe55d48c07f62796d2bed3605a7c189922c
|
235fe17f58d41165010be7e4122cb67bdc866fe7
|
refs/heads/master
| 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 |
Apache-2.0
| 2022-10-20T13:52:10 | 2017-09-27T12:51:32 |
Python
|
UTF-8
|
Python
| false | false | 911 |
py
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_guest_agent_ping import V1GuestAgentPing
class TestV1GuestAgentPing(unittest.TestCase):
""" V1GuestAgentPing unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GuestAgentPing(self):
"""
Test V1GuestAgentPing
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_guest_agent_ping.V1GuestAgentPing()
pass
if __name__ == '__main__':
unittest.main()
|
[
"kubevirt-bot"
] |
kubevirt-bot
|
e49a33153e56bb9a9b5647ae17c825d2b36e806b
|
d3e251512ba74ece49d3159e058aad963ea40ec2
|
/Botgui/settings.py
|
83df29d2102693678a840431219534f1d613dc50
|
[] |
no_license
|
Rizwan0143/Botgui
|
4e22ae90769d4e4f316bc70dbaa4d41b8fe9231a
|
6a9c3e291c67f4d5a67214b899cc3ef02fae4aa0
|
refs/heads/master
| 2020-03-23T08:15:24.090830 | 2018-07-17T15:35:40 | 2018-07-17T15:35:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,814 |
py
|
"""
Django settings for Botgui project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@8j=#9r8=jol!sv#3icx&6$-3wgi-@s!+jkcqcbbhd1m(x54(p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'binance_feed',
'bot',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'Botgui.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Botgui.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
|
[
"[email protected]"
] | |
4075b5b22b15371dc890e51d37bd615c1757abd3
|
61de27f71820834195dfb44ea5267ce2a710f127
|
/test5.py
|
974fe349590e2addbebd1965c860ebfc26ba2a9f
|
[] |
no_license
|
paulfr8/tex2epub-python
|
419a3c6808ee89fc06b90ddede2011e31834cb7c
|
2ad0bbd8b48ace63a89c520b6a9a0a6f5c355e64
|
refs/heads/master
| 2020-05-19T07:22:43.236599 | 2013-05-26T15:30:38 | 2013-05-26T15:30:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,660 |
py
|
import re
#TODO-List:
#Handle 2+ argument commands
#Handle tables
#Handle lists
#Handle 0-argument commands (e.g. \hline)
#Handle 1 argument commands splited over several lines
#Programs list
#Tables
#Lists
#2+ argument commands
#0 argument commands
#1 argument commands
#punctuation
#split files after big divisions <h1> or <h2>
'''Commands lists for one-argument latex commands
oa_cmd_tex_op: latex command
oa_cmd_htm_op: xhtml opening tag
oa_cmd_htm_ed: xhtml closing tag
'''
oa_cmd_tex_op = []
oa_cmd_htm_op = []
oa_cmd_htm_ed = []
'''Fills in the commands list'''
def oa_cmds_fillin (fcinp_oa_cmd_tex_op, fcinp_oa_cmd_htm_op, fcinp_oa_cmd_htm_ed):
oa_cmd_tex_op.append(fcinp_oa_cmd_tex_op)
oa_cmd_htm_op.append(fcinp_oa_cmd_htm_op)
oa_cmd_htm_ed.append(fcinp_oa_cmd_htm_ed)
oa_cmds_fillin ("\\textbf{","<b>","</b>")
oa_cmds_fillin ("{\\bfseries","","</b>")
oa_cmds_fillin ("\\emph{","<em>","</em>")
oa_cmds_fillin ("{\\em","<em>","</em>")
oa_cmds_fillin ("\chapter{","<h1>","</h1>")
oa_cmds_fillin ("\\section{","<h2>","</h2>")
oa_cmds_fillin ("\\subsection{","<h3>","</h3>")
oa_cmds_fillin ("\\subsubsection{","<h4>","</h4>")
'''Tests if a charstring is a latex command'''
def is_there_oacmds (fcinp_charstring, fcinp_list):
for elt in fcinp_list:
if fcinp_charstring == elt:
return True
'''Splits a charstring (a line of latex code) into latex commands and single characters for the characters that do no belong to a latex command'''
'''Supports one-argument commands,
TODO: make it support multiple argument commands, and single argument commands'''
def split_line (fcinp_char):
fcout_list = []
fcine_cmd = ""
fcine_word = ""
for char in fcinp_char:
if char == "\\":
fcine_cmd = fcine_cmd + char
continue
elif char == "{":
if fcine_cmd != "":
fcine_cmd = fcine_cmd + char
fcout_list.append(fcine_cmd)
fcine_cmd = ""
continue
else:
fcine_cmd = fcine_cmd + char
continue
elif char == "}":
fcout_list.append(char)
continue
elif char == " ":
if fcine_cmd != "":
fcout_list.append(fcine_cmd)
fcine_cmd = ""
fcout_list.append(char)
continue
else:
fcout_list.append(char)
continue
else:
if fcine_cmd != "":
fcine_cmd = fcine_cmd + char
continue
else:
fcout_list.append(char)
continue
return(fcout_list)
'''Counts the number of commands in a charstring (line of Latex code)'''
'''TODO: make it more simple: it should return directly the number of commands, and not the list of the commands'''
def how_many_commands (fcinp_char):
fcout_list = []
fcine_cmd = ""
fcine_word = ""
fcine_nb_cmds = ""
for char in fcinp_char:
if char == "\\":
fcine_cmd = fcine_cmd + char
continue
elif char == "{":
fcine_cmd = fcine_cmd + char
fcout_list.append(fcine_cmd)
fcine_cmd = ""
continue
elif char == "}":
continue
elif char == " ":
if fcine_cmd != "":
fcout_list.append(fcine_cmd)
fcine_cmd = ""
continue
else:
continue
else:
if fcine_cmd != "":
fcine_cmd = fcine_cmd + char
continue
else:
continue
fcine_nb_cmds = len(fcout_list)
return(fcine_nb_cmds)
'''Replace the latex commands of a list of elements given by split_line, based on the list of latex commands and their corresponding xhtml tags'''
'''Only with one-argument commands'''
def replace_tex (fcinp_txt_list, fcinp_nb_cmds, fcinp_cmds_list, fcinp_cmds_listb, fcinp_cmds_listc):
i = 0
fcout_list = fcinp_txt_list
test_break = 0
while True:
test_break = test_break + 1
if test_break > 100:
break
if i == fcinp_nb_cmds:
break
fcine_cmd_inprogress = 0
fcine_cmd_to_apply = ""
elt_nb = 0
for elt in fcinp_txt_list:
if is_there_oacmds(elt, fcinp_cmds_list):
if fcine_cmd_inprogress == 0:
cmd_position = 0
for cmd in fcinp_cmds_list:
if elt == fcinp_cmds_list[cmd_position]:
fcout_list[elt_nb] = fcinp_cmds_listb[cmd_position]
fcine_cmd_to_apply = fcinp_cmds_listc[cmd_position]
break
else:
cmd_position = cmd_position + 1
continue
fcine_cmd_inprogress = 1
elt_nb = elt_nb + 1
continue
if fcine_cmd_inprogress != 0:
fcine_cmd_inprogress = fcine_cmd_inprogress + 1
elt_nb = elt_nb + 1
continue
elif elt == "}":
if fcine_cmd_inprogress == 1:
fcout_list[elt_nb] = fcine_cmd_to_apply
fcine_cmd_inprogress = 0
i = i + 1
elt_nb = elt_nb + 1
continue
if fcine_cmd_inprogress == 0:
elt_nb = elt_nb + 1
continue
if fcine_cmd_inprogress > 1:
fcine_cmd_inprogress = fcine_cmd_inprogress - 1
elt_nb = elt_nb + 1
continue
else:
elt_nb = elt_nb + 1
continue
return(fcout_list)
'''Converts a list of charstrings into one charstring'''
def txt_list_2_txt_str(fcinptxtlist):
fcoutstring = ""
for elt in fcinptxtlist:
fcoutstring = fcoutstring + elt
continue
return (fcoutstring)
'''Uses the previous functions to convert one-argument latex commands in a charstring into the corresponding xhtml tags, based on the list of commands'''
def replace_one_arg_cmds (fcinp_string, fcinp_list, fcinp_listb, fcinp_listc):
fcine_split_line = split_line (fcinp_string)
fcine_nbcmds = how_many_commands (fcinp_string)
fcine_replaced_cmds = replace_tex (fcine_split_line, fcine_nbcmds, fcinp_list, fcinp_listb, fcinp_listc)
fcout_string = txt_list_2_txt_str (fcine_replaced_cmds)
return(fcout_string)
test_charstring = "{\em \\textbf{blab\emph{3}la} \\emph{Ro ro}}"
print (test_charstring)
final_string = replace_one_arg_cmds (test_charstring, oa_cmd_tex_op, oa_cmd_htm_op, oa_cmd_htm_ed)
print (final_string)
|
[
"[email protected]"
] | |
e871e9044ad011bb88ff42f52abb3f06b2327ad5
|
6c38efd76998d3b607c8acd0bb6ff0d0872c8c98
|
/πατσακης-εργασια 5.py
|
10602f6b888277bdc44f38f743a6d8a8e029eea4
|
[] |
no_license
|
nikosf123/python_patsak
|
b7703371542927da4f9250a20569dbe1fa77a67c
|
e58846b79660920fea6bacd5d1a079103120e777
|
refs/heads/master
| 2021-01-09T15:26:31.613291 | 2020-02-25T20:56:58 | 2020-02-25T20:56:58 | 242,355,574 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,265 |
py
|
print "gia na grapseis esu to arxeio dwse 1 "
print "gia ena aytomato arxeio dwse 2 "
print"-------------------------------------------"
epil=raw_input("dwse epilogh ")
while epil !="1" and epil!="2":
print "edwses lathos epilogh janadwse"
epil=raw_input("dwse epilogh ")
if epil=="1":
arxeio=raw_input("dwse ena keimeno ")
fin =open("text.txt","w")
fin.write(arxeio)
fin.close()
else:
fin = open('text.txt', 'w')
fin.write("simera einai deutera \n")
fin.write("exoume mathima \n")
fin.write("alla variemai na paw")
fin.close()
fin=open("text.txt","r")
w=fin.read()
fin.close
le=[]
word=" "
meg=[]
gram=0
sm="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
n=len(w)
for i in range (n) :
if w[i] !=" " and w[i]!="\n" and w[i] in sm:
word=word+w[i]
gram=gram+1
else:
le.append(word)
meg.append(gram)
gram=0
word=" "
le.append(word)
meg.append(gram)
print "to keimeno poy edwses"
print w
print"-------------------------------------------"
new=[]
found="False"
n2=len(le)
prwt=" "
lexi=" "
print "lejis me treia grammata kai panw vazontas to ptwto sto telos kai th katalhjh ay"
for i in range (n2):
if meg[i]>3:
w2=le[i]
print (w2[2:]+w2[1]+"ay")
|
[
"[email protected]"
] | |
9d74239c0561a82006e754793b8ad689019da6e8
|
42fe1fa7763f144764c6378c66d88d7f8d86ab03
|
/biohsmm/util/read_atac.py
|
64b6eb2d136472475c0fbb20278aa1aa93220ccb
|
[] |
no_license
|
anders-w-rasmussen/biohsmm
|
0b656beaf6f87081edc32166be12d454771b2d3f
|
2030243a488a8832c650b34d5773af96188be72d
|
refs/heads/master
| 2022-06-14T21:10:35.597723 | 2020-04-27T20:47:29 | 2020-04-27T20:47:29 | 258,550,563 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,096 |
py
|
import numpy as np
import pysam
obs_vec = list()
length = list()
start = list()
chrom = list()
def bam_to_obs(c, s, e, filename):
'''
:param chrom:
:param start:
:param end:
:param filename:
:return: data array (obs)
'''
obs_vec.append(reads_from_sam(filename, c, s, e))
length.append(e - s)
start.append(s)
chrom.append(c)
cuts, read_length = process_data(obs_vec, length, start)
data = np.zeros([np.sum(length), 3])
data[:, 0] = cuts[:, 0]
data[:, 1:] = read_length
return data
def process_data(data, length, start):
# Create Observation Vectors
# Create Cuts
obs_vec = np.zeros([np.sum(length), 1])
tracking = 0
for i_ in np.arange(len(data)):
for n_ in np.arange(data[i_].shape[0]):
r_st = np.max([np.int(data[i_][n_, 0]) - start[i_], 0])
r_end = np.min([np.int(data[i_][n_, 1]) - start[i_], length[i_] - 1])
obs_vec[tracking + r_st, 0] += 1
obs_vec[tracking + r_end, 0] += 1
tracking += length[i_]
# Create Read Length Distributions
obs_reads = np.zeros([np.sum(length), 2])
tracking = 0
for i_ in np.arange(len(data)):
for n_ in np.arange(data[i_].shape[0]):
r_st = np.max([np.int(data[i_][n_, 0]) - start[i_], 0])
r_end = np.min([np.int(data[i_][n_, 1]) - start[i_], length[i_] - 1])
obs_reads[tracking + r_st:tracking + r_end, 0] += np.ones(r_end - r_st)
obs_reads[tracking + r_end, 1] += 1
tracking += length[i_]
return obs_vec, obs_reads
def reads_from_sam(samfile_name, chr, window_start, window_end):
reads_array = []
sam_file = pysam.AlignmentFile(samfile_name)
for read in sam_file.fetch(chr, window_start, window_end):
if read.flag in [83, 99, 147, 163]:
left_tn5_start = min(read.reference_start, read.next_reference_start) - 4
right_tn5_end = left_tn5_start + abs(read.template_length) + 8
reads_array.append([left_tn5_start, right_tn5_end])
return np.array(reads_array)
|
[
"[email protected]"
] | |
2ff528d76ec3c032363cc59f587b0f6da4f410dc
|
6e373b40393fb56be4437c37b9bfd218841333a8
|
/Level_3/Lecture_21/enroll/models.py
|
82f2f10c875633e48efc381b7887773f0c960169
|
[] |
no_license
|
mahto4you/Django-Framework
|
6e56ac21fc76b6d0352f004a5969f9d4331defe4
|
ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b
|
refs/heads/master
| 2023-01-22T01:39:21.734613 | 2020-12-04T03:01:17 | 2020-12-04T03:01:17 | 318,383,854 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=70)
email = models.EmailField(max_length=100)
password =models.CharField(max_length=100)
|
[
"[email protected]"
] | |
bf665410bed50b229f08cbbbf8f37762a24340cb
|
5196f5c2997592a1eca7c16e48d2a240b2968ffe
|
/venv/bin/python-config
|
c2b4ef8a5fd0a5b36101aab4f9b0d8df9421ef23
|
[] |
no_license
|
rashmitpankhania/FoodEx
|
3594bed65ef10e73e293d913d6808f6ae935d49a
|
f772e1ef835c436bdf969001f5a78398551a8215
|
refs/heads/master
| 2021-05-11T17:30:19.864945 | 2018-01-28T06:46:28 | 2018-01-28T06:46:28 | 117,799,546 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,357 |
#!/home/rash/PycharmProjects/connect/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"[email protected]"
] | ||
82fcee9c8e03a443d43358ac3efc3bbec521b329
|
d190e9e4da0eb177f2e7c19c3684164ae685c44f
|
/mugMatch.py
|
c9b1989d6ab2ae4d270a735201a2729241217f7c
|
[
"MIT"
] |
permissive
|
AndrewsOR/MugMatch
|
d8a81bdb085d32bcbfbf2b07b7ed67db2a403fad
|
1a79bf410ff2773a632e1c5819c5121d9ac42128
|
refs/heads/master
| 2021-09-03T07:03:03.511263 | 2018-01-06T17:38:47 | 2018-01-06T17:38:47 | 116,165,354 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,633 |
py
|
#!/usr/bin/python3
#-------- Required Libraries --------------------------------------
# for API requests
import requests
from requests_oauthlib import OAuth1
# for GUI elements
import tkinter as tk # using several elements, so import whole module
from tkinter import messagebox
#from PIL import ImageTk, Image # from Python Image Library
from PIL import Image, ImageTk
# for dataset manipulation
import pandas as pd
# this is the credentials.py you created from credentialsTemplate.py
# (or implement your own handshake)
from mugCredentials import API_KEY, API_SECRET, \
ACCESS_TOKEN, ACCESS_SECRET, USER_NAME
#-------------------------------------------------------------------
#-------- API requests ---------------------------------------------
JSON_HEADERS = {'Accept':'application/json','Content-Type':'application/json'}
def getJsonResponse(url, auth):
""" wraps GET request and parses JSON response """
r = requests.get(url, auth = auth, headers = JSON_HEADERS)
r.raise_for_status()
return r.json()['Response']
def deleteJsonResponse(url, auth):
""" wraps DELETE request and parses JSON response """
r = requests.delete(url, auth = auth, headers = JSON_HEADERS)
r.raise_for_status()
return r.json()['Response']
def getAlbumsForUser( userName,
auth,
albumAttribs = ['AlbumKey','Name','ImageCount']
):
"""
Given a user name, return (in a list) dicts of attributes of each album.
Paginate the request if necessary using the default results count.
"""
albumUrl = 'https://api.smugmug.com/api/v2/user/' + userName + '!albums'
albumList = []
lastPage = False
while(not lastPage):
printNow('Requesting: ' + albumUrl)
r = getJsonResponse(albumUrl, auth)
albumList += [{k:x[k] for k in albumAttribs} for x in r['Album']]
if 'NextPage' in r['Pages']:
albumUrl = 'https://api.smugmug.com' + r['Pages']['NextPage']
else:
lastPage = True
return albumList
def getImagesForAlbum( albumKey,
auth,
imageAttribs = ['ImageKey','ArchivedMD5','ArchivedSize',
'FileName','Date','LastUpdated',
'ThumbnailUrl','Uri']
):
"""
Given an album key, return (in a list) dicts of attributes of each image.
Include the parent AlbumKey as an attribute.
Paginate the request if necessary using the default results count.
"""
albumImagesUrl = 'https://api.smugmug.com/api/v2/album/' + albumKey + '!images'
imagesList = []
lastPage = False
while(not lastPage):
printNow('Requesting: ' + albumImagesUrl)
r = getJsonResponse(albumImagesUrl, auth)
if not 'AlbumImage' in r:
printNow('Empty album at ' + albumImagesUrl)
else:
imagesList += [ {**{k:x[k] for k in imageAttribs},
**{'AlbumKey':albumKey} } for x in r['AlbumImage'] ]
if 'NextPage' in r['Pages']:
albumImagesUrl = 'https://api.smugmug.com' + r['Pages']['NextPage']
else:
lastPage = True
return imagesList
def deleteImageFromAlbum( albumImageUri, auth):
""" Delete an image, given its location in an album"""
albumImageUrl = 'https://api.smugmug.com' + albumImageUri
printNow('Deleting: ' + albumImageUrl)
return deleteJsonResponse(albumImageUrl, auth=auth)
def getAlbumsAndImagesForUser(userName, auth):
"""
Given a user name, return datasets (as pandas.DataFrame) of:
(1) Albums and their attributes
(2) Images (from any album) and their attributes, including parent album key.
"""
albums = getAlbumsForUser(userName, auth)
images = [getImagesForAlbum(x,auth) for x in [a['AlbumKey'] for a in albums]] # nested list
imageList = [image for album in images for image in album] # flatten the above list
albumData = pd.DataFrame.from_records(albums).set_index('AlbumKey')
imageData = pd.DataFrame.from_records(imageList)
for col in ['LastUpdated','Date']:
if col in imageData:
imageData[col] = pd.to_datetime(imageData[col])
return albumData, imageData
#------------------------------------------------------------
#----- Data manipulation ------------------------------------
def findDupesAcrossAlbums(albumDf, imageDf):
"""
Identify duplicate hashes in a given user's albums
Return dict of image metadata for each set of duplicates
"""
# create a dictionary of DataFrames of image metadata, one for each unique image
imageDf['duplicateHashFlag'] = imageDf.duplicated(subset='ArchivedMD5', keep=False)
imageDf['fileNameLength'] = imageDf['FileName'].apply(len)
dupesDf = imageDf.loc[imageDf['duplicateHashFlag']
].join(albumDf.rename(index=str,columns={'Name':'AlbumName'}),
on='AlbumKey').sort_values(['ImageCount','fileNameLength'])
dupesDf['fileAlbmStr'] = ( dupesDf['AlbumName'].apply(fixStringLength,n=22) +
dupesDf['ImageCount'].apply(lambda x: ' ({:>4d} photos)'.format(x)) )
dupesDf['filePrefStr'] = dupesDf['FileName'].apply(lambda x: fixStringLength(x.split('.')[0],n=14, alignRight=False) )
dupesDf['fileSuffStr'] = dupesDf['FileName'].apply(lambda x: x.split('.')[-1].lower())
dupesDf['fileSizeStr'] = (dupesDf['ArchivedSize'] / 1024**2).round(2).apply(lambda x: '{:.2f}M'.format(x))
dupesDf['ImageDesc'] = ( dupesDf['fileAlbmStr'] + ' / ' +
dupesDf['filePrefStr'] + ' (' +
dupesDf['fileSizeStr'] + ' ' +
dupesDf['fileSuffStr'] + ')' )
return dict( iter( dupesDf[['ArchivedMD5','ThumbnailUrl',
'Uri','ImageDesc']].groupby('ArchivedMD5') ) )
#------------------------------------------------------------
#------ Misc ------------------------------------------------
def fixStringLength(s, n, ctd='...', alignRight = True):
"""
Forces a string into a space of size `n`, using continuation
character `ctd` to indicate truncation
"""
try:
return ( s[:(n-len(ctd))] + ctd if len(s) > n
else s.rjust(n) if alignRight
else s.ljust(n)
)
except (AttributeError, TypeError, ValueError):
raise AssertionError('Input should be a string')
def printNow(x):
"""Shorthand for printing to console"""
print(x, flush=True)
#------------------------------------------------------------
#---- GUI ---------------------------------------------------
class CopyDeleter(tk.Frame):
def __init__(self, root, data, auth):
"""
Scrollbar code credit to Bryan Oakley:
https://stackoverflow.com/a/3092341/2573061
"""
super().__init__()
self.canvas = tk.Canvas(root, borderwidth=0)
self.frame = tk.Frame(self.canvas)
self.scroll = tk.Scrollbar(root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.scroll.set)
self.scroll.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.frame, anchor="nw",
tags="self.frame")
self.frame.bind("<Configure>", self.onFrameConfigure)
self.data = data
self.auth = auth
self.initUI()
def onFrameConfigure(self, event):
"""Reset the scroll region to encompass the inner frame"""
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def initUI(self):
"""
Creates the static UI content and the innerFrame that will hold the
dynamic UI content (i.e., the Checkbuttons for the copies)
"""
self.master.title("Duplicate Removal")
self.instructLabel = tk.Label( self.frame, justify='left',
text = "Select the copies you wish to DELETE.")
self.skipButton = tk.Button( self.frame, text="Skip", command = self.populateUI)
self.deleteButton = tk.Button( self.frame, text="Delete selected", fg = 'red',
command = self.executeSelection )
self.quitButton = tk.Button( self.frame, text="Exit", command=self.frame.quit)
self.innerFrame = tk.Frame( self.frame)
self.instructLabel.pack(anchor = 'nw', padx=5,pady=5)
self.innerFrame.pack(anchor='nw', padx=5, pady=20, expand=True)
self.deleteButton.pack(side='left', padx=5,pady=5)
self.skipButton.pack(side='left', padx=5,pady=5)
self.quitButton.pack(side='left', padx=5,pady=5)
self.populateUI()
def clearUI(self):
"""remove any Checkbuttons from previous calls"""
for child in self.innerFrame.winfo_children():
child.destroy()
def getNextDupeSet(self):
try:
return self.data.popitem()[1]
except KeyError:
messagebox.showinfo("All done", "You've reviewed all duplicates.")
raise KeyError()
def populateUI(self):
"""
Creates and packs a list of Checkbuttons (cbList) into the innerFrame
By default, the first Checkbutton will be unchecked, all others checked.
You should help the user out by passing the copy most likely to be the "original"
(using some business rule) at the head of the list
"""
self.clearUI()
try:
imgData = self.getNextDupeSet()
# create lists from data to populate Checkbuttons
imgDescs = imgData['ImageDesc'].tolist()
thumbUrls = imgData['ThumbnailUrl'].tolist()
# This reference is required to prevent premature garbage collection
# More info at the getImgFromUrl docstring
self.thumbImgs = [self.getImgFromUrl(x) for x in thumbUrls]
n = len(imgData.index)
self.cbList = [None] * n
self.cbValues = [tk.BooleanVar() for i in range(n)]
self.cbDestUris = imgData['Uri'].tolist()
for i in range(n):
self.cbList[i] = tk.Checkbutton( self.innerFrame,
text=imgDescs[i],
image = self.thumbImgs[i],
variable = self.cbValues[i],
compound='left' )
# By default, leave initial button unchecked, others checked
if i: self.cbList[i].select()
self.cbList[i].pack(anchor = 'w', padx=5,pady=5)
except KeyError:
self.frame.quit()
def getImgFromUrl(self, url):
"""
Return an image from a given URL as a Python Image Library PhotoImage
Uses solution from : https://stackoverflow.com/a/18369957/2573061
This function is used to grab thumbnails for the photo picker
It is inside the CopyDeleter class due to tkinter garbage collection problem.
This problem is described at:
https://stackoverflow.com/a/3366046/2573061 and:
http://effbot.org/pyfaq/why-do-my-tkinter-images-not-appear.htm
"""
print('Requesting: '+url)
try:
r = requests.get(url, auth=self.auth, stream=True)
pilImg = Image.open(r.raw)
phoImg = ImageTk.PhotoImage(pilImg)
return phoImg
except Exception as e:
print('Error ' + repr(e) )
return None
def querySelection(self):
return [x.get() for x in self.cbValues]
def getDestUris(self):
return self.cbDestUris
def executeSelection(self):
selects = self.querySelection()
destUris = self.getDestUris()
if ( not all(x for x in selects) or
messagebox.askokcancel(message='Delete ALL occurrences of this image?')
):
for selected, destUri in zip(selects,destUris):
if selected:
printNow('Deleting copy at: ' + destUri)
deleteImageFromAlbum(destUri, auth=self.auth)
else:
printNow('Ignoring copy at: ' + destUri)
self.populateUI()
#------------------------------------------------------------
def main():
# Authentication (stored locally for now)
auth = OAuth1(API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_SECRET)
# Query all albums for user, then all images in those albums
albums, images = getAlbumsAndImagesForUser(USER_NAME, auth)
# Find duplicate images across albums using the image hash
dupesDict = findDupesAcrossAlbums(albums, images)
# launch the CopyDeleter app
root = tk.Tk()
root.geometry("800x500+250+100") # width x height + xOffset + yOffset
app = CopyDeleter(root, data=dupesDict, auth=auth)
app.mainloop()
# in case you're running it inside an IDE (not recommended):
try:
root.destroy()
except tk.TclError:
pass
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
164ea3c1d86ff2f8814148d93d4bfa038e83dea1
|
e07177ad35b7eff50165bb0cb853e12291c2c40c
|
/Remove one char palin.py
|
d4788d72727a573d6acd8da73698b1fa604aa977
|
[] |
no_license
|
yasosurya33/Python
|
af707410234e09760b898853354e9567285a5e45
|
17c4055086d7ccec5076f2b7b2230238ad4a199c
|
refs/heads/master
| 2020-07-04T06:29:15.912630 | 2019-08-18T05:54:52 | 2019-08-18T05:54:52 | 202,187,308 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 548 |
py
|
def palin(x):
sam1 = x
l = int(len(sam1))
i = 0
lab = 0
while i < (l // 2):
if (sam1[i] == sam1[l - 1 - i]):
lab = 1
i += 1
else:
break
if lab == 1:
return 1
else:
return 0
nam = input()
dub = []
sam=''
for i in nam:
dub.append(i)
dub1=dub.copy()
lab1=0
for i in range(len(dub)):
dub[i]=""
for i in dub:
sam+=i
if palin(sam)==1:
lab1=1
sam=""
dub=dub1.copy()
if lab1==1:
print("YES")
else:
print("NO")
|
[
"[email protected]"
] | |
d43e358c58ff1e26ae2b91404b33f60136411d29
|
84b6f74a9a78a6d54fad5ebbeafd6e8054e78e6c
|
/admin.py
|
e35332f0c24aeca1bbc8ee62db7498efc84f1141
|
[] |
no_license
|
uje/embedmedia
|
9a6cb9b19e2020a4f6402afafca45c46c49eb4a0
|
6de3c45802ac62f8a28d484f4fb38fa749a5df06
|
refs/heads/master
| 2021-01-01T03:42:38.447561 | 2016-04-15T09:21:44 | 2016-04-15T09:21:44 | 56,307,661 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,979 |
py
|
# -*- coding: utf-8 -*--
# Name: 模块1
# Purpose:
#
# Author: Jame
#
# Created: 11/04/2012
# Copyright: (c) Jame 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
import webapp2,uuid
from google.appengine.ext.webapp import util
from google.appengine.api import users
import model,utils,json,mcache
from google.appengine.ext import db
import logging
class MainHandler(webapp2.RequestHandler):
def get(self):
if utils.isLogin():
#domain=model.Domains().find(utils.getOwner())
#using cache
domain=mcache.getDomain(utils.getOwner())
if domain:
domain={
"title":domain.title,
"domain":domain.domain,
"owner":domain.owner,
"themeName":domain.theme,
"theme":utils.gettheme_by_path(domain.theme)
}
else:
self.redirect('/admin/guide')
return
oauth=mcache.get_oauth_by_owner(domain["owner"])
if oauth is None:
oauth=model.OAuth(owner=domain["owner"],token=utils.getGuid())
oauth.put()
temp_values={
"domain":domain,
"theme": utils.theme,
"domainjson":json.dumps({"code":200,"data":domain}),
"id": oauth.token
}
self.response.out.write(utils.render('template/admin.html',temp_values))
else:
self.redirect('admin/login')
#self.redirect(users.create_login_url(self.request.url))
class LoginHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(utils.render('template/login.html', {}))
def post(self):
uid = self.request.get('uid')
pwd = self.request.get('pwd')
if uid == "":
self.response.out.write(json.dumps({ "message": "帐户为空!" }))
return
if pwd == "":
self.response.out.write(json.dumps({ "message": "密码为空!" }))
return
domain = model.Domains().find(uid)
if domain:
if domain.pwd is None or domain.pwd == '':
domain.pwd = pwd
utils.login(domain.owner)
self.response.out.write(json.dumps({ "code": 200 }))
elif domain.pwd == pwd:
utils.login(domain.owner)
self.response.out.write(json.dumps({ "code": 200 }))
else:
self.response.out.write(json.dumps({ "message": "密码不匹配!" }))
else:
self.response.out.write(json.dumps({ "message": "帐户不存在!" }))
class LogoutHandler(webapp2.RequestHandler):
def get(self):
utils.logout()
self.redirect('/')
class AddFavHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write('access Permission')
def post(self):
if utils.isLogin():
title=self.request.get('title')
url=self.request.get('url')
tag=self.request.get('tag')
if title=="" or url=="" or tag=="":
self.response.out.write(json.dumps({"code":500 }))
return
owner=utils.getOwner()
if title and tag and url:
try:
model.Anthors().insert(title=title,url=url,tag=tag,owner=owner)
model.Tags().insert_or_change(tag,owner)
mcache.clear_links_cache(owner)
mcache.clear_tags_cache(owner)
self.response.out.write(json.dumps({"code":200}))
except:
self.response.out.write(json.dumps({"code":501}))
else:
self.response.out.write(json.dumps({"code":500}))
else:
self.redirect(users.create_login_url(self.request.url))
class DomainHandler(webapp2.RequestHandler):
def post(self):
if utils.isLogin()==False:
self.response.out.write(json.dumps({"code":403}))
return
name=self.request.get('name')
domain=self.request.get('domain')
theme=self.request.get('theme')
if name is None:
self.response.out.write(json.dumps({"code":501}))
if domain is None:
self.response.out.write(json.dumps({"code":502}))
if theme is None:
theme=utils.theme[0].path
try:
model.Domains().insert_or_change(domain,utils.getOwner(),name,theme)
self.response.out.write(json.dumps({"code":200}))
mcache.clear()
except:
self.response.out.write(json.dumps({"code":500}))
class DeleteLinkHandler(webapp2.RequestHandler):
def post(self):
if utils.isLogin()==False:
self.response.out.write(json.dumps({"code":403}))
return
name=self.request.get("name")
try:
names=name.split(",")
for n in names:
try:
model.Anthors().delete_link(utils.getOwner(),n)
except:
continue
mcache.clear()
self.response.out.write(json.dumps({"code":200}))
except:
self.response.out.write(json.dumps({"code":500}))
class LinkChangeHandler(webapp2.RequestHandler):
def post(self):
if utils.isLogin()==False:
self.response.out.write(json.dumps({"code":403}))
return
orig_title=self.request.get("origTitle")
title=self.request.get("title")
url=self.request.get("url")
tag=self.request.get("tag")
if title=="" or url=="" or tag=="":
self.response.out.write(json.dumps({"code":500 }))
return
link=model.Anthors().find_link(utils.getOwner(),orig_title)
if link is None:
self.response.out.write(json.dumps({"code":500 }))
return
try:
if link.title!=title:
link.title=title
if link.url!=url:
link.url=url
if link.tag!=tag:
link.tag=tag
model.Tags().insert_or_change(tag,utils.getOwner())
model.Tags().delete_or_change(link.tag,utils.getOwner())
link.put()
mcache.clear()
self.response.out.write(json.dumps({"code":200}))
except:
self.response.out.write(json.dumps({"code":500}))
class ErrorHandler(webapp2.RequestHandler):
def get(self,u):
self.response.out.write('page not found')
class GetLinksHandler(webapp2.RequestHandler):
def get(self):
if utils.isLogin()==False:
self.response.out.write(json.dumps({"code":403}))
return
links=list()
anthors=model.Anthors()
tag=self.request.get('tag')
if tag:
#links=anthors.find_links(utils.getOwner(),tag)
#using cache
links=mcache.fink_links_by_tag(utils.getOwner(),tag)
else:
#links=anthors.find(utils.getOwner())
#using cache
links=mcache.find_links(utils.getOwner())
self.response.out.write(anthors.getjson(links))
class GuideHandler(webapp2.RequestHandler):
def get(self):
if utils.isLogin()==False:
self.redirect(users.create_login_url(self.request.url))
else:
domain=mcache.getDomain(utils.getOwner())
if domain is None:
self.response.out.write(utils.render("template/guide.html",{
"themes":utils.theme,
"domain":{
"themeName":"basev3",
"title":"风语"
}
}))
else:
self.redirect('/admin')
class NewTokenHandler(webapp2.RequestHandler):
def get(self):
if utils.isLogin():
token=utils.getGuid()
domain=mcache.getDomain(utils.getOwner())
old_oauth=mcache.get_oauth_by_owner(domain.owner)
oauth=model.OAuth().insert_or_change(domain.owner,token)
mcache.del_oauth_cache(old_oauth.owner)
mcache.del_oauth_cache(old_oauth.token)
self.response.out.write(token)
#####页面区域#####
class PagesHandler(webapp2.RequestHandler):
def get(self):
if utils.isLogin()==False:
self.redirect(users.create_login_url(self.request.url))
else:
tmp={
"theme":utils.theme,
"domain":{
"themeName":"basev1"
},
"pages":mcache.get_cache_pages(utils.getOwner())
}
self.response.out.write(utils.render("template/pages.html",tmp))
class PageGetHandler(webapp2.RequestHandler):
def post(self):
name=self.request.get('name')
if utils.isLogin()==False:
raise
else:
page=mcache.get_cache_page(utils.getOwner(),name)
self.response.out.write(json.dumps({"name":page.name,"title":page.title,"html":page.html,"showInTop":page.showInTop, "useFrame": page.useFrame}))
class PageAddHandler(webapp2.RequestHandler):
def post(self):
if utils.isLogin()==False:
raise
name=self.request.get('name')
title=self.request.get('title')
html=self.request.get('html')
_showInTop = self.request.get('showInTop')
_useFrame = self.request.get('useFrame')
useFrame = False
showInTop = False
if name is None or name=="":
raise
if title is None or title=="":
raise
if html is None or html=="":
raise
if model.Pages().exist(utils.getOwner(),name):
self.response.out.write('{"code":503}')
return
if _showInTop=="1":
showInTop=True
if _useFrame == "1":
useFrame = True
page=model.Pages(owner=utils.getOwner(),name=name,title=title,html=html,showInTop=showInTop, useFrame = useFrame)
page.put()
self.response.out.write('{"code":200}')
class PageChangeHandler(webapp2.RequestHandler):
def post(self):
if utils.isLogin()==False:
raise
oname=self.request.get('oname')
name=self.request.get('name')
title=self.request.get('title')
html=self.request.get('html')
_showInTop=self.request.get('showInTop')
_useFrame = self.request.get('useFrame')
useFrame = False
showInTop=False
if name is None and title is None and html is None:
raise
if _showInTop=="1":
showInTop=True
if _useFrame == "1":
useFrame = True
model.Pages().change(utils.getOwner(),name,oname,title,html,showInTop,useFrame)
mcache.clear_pages_cache(utils.getOwner())
self.response.out.write('{"code":200}')
class PageDeleteHandler(webapp2.RequestHandler):
def post(self):
if utils.isLogin()==False:
raise
name=self.request.get("name")
if name is None or name=="":
raise self.error("name is empty")
else:
model.Pages().delete_page(utils.getOwner(),name)
self.response.out.write('{"code":200}')
app = webapp2.WSGIApplication([('/admin/addfav',AddFavHandler),
('/admin/pages', PagesHandler),
('/admin/domainchange', DomainHandler),
('/admin/dellink', DeleteLinkHandler),
('/admin/getlinks',GetLinksHandler),
('/admin/linkchange',LinkChangeHandler),
('/admin/guide', GuideHandler),
('/admin/newtoken', NewTokenHandler),
('/admin/getpage', PageGetHandler),
('/admin/addpage', PageAddHandler),
('/admin/pagedelete', PageDeleteHandler),
('/admin/pagechange', PageChangeHandler),
('/admin/login', LoginHandler),
('/admin/logout', LogoutHandler),
('/admin/?', MainHandler),
('(.*)',ErrorHandler)],
debug=True)
|
[
"[email protected]"
] | |
3c37470e6687cc51f01b3bfb39c7f931f854f693
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/Gio/SocketServiceClass.py
|
8c18c95238ae487ac715dd801bd46c959b88b0ce
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null |
UTF-8
|
Python
| false | false | 5,419 |
py
|
# encoding: utf-8
# module gi.repository.Gio
# from /usr/lib64/girepository-1.0/Gio-2.0.typelib
# by generator 1.147
# no doc
# imports
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.Gio as __gi_overrides_Gio
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class SocketServiceClass(__gi.Struct):
"""
:Constructors:
::
SocketServiceClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
incoming = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved3 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved4 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved5 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved6 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(SocketServiceClass), '__module__': 'gi.repository.Gio', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'SocketServiceClass' objects>, '__weakref__': <attribute '__weakref__' of 'SocketServiceClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f4b87fc8810>, 'incoming': <property object at 0x7f4b87fc8900>, '_g_reserved1': <property object at 0x7f4b87fc89f0>, '_g_reserved2': <property object at 0x7f4b87fc8ae0>, '_g_reserved3': <property object at 0x7f4b87fc8bd0>, '_g_reserved4': <property object at 0x7f4b87fc8cc0>, '_g_reserved5': <property object at 0x7f4b87fc8db0>, '_g_reserved6': <property object at 0x7f4b87fc8ea0>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(SocketServiceClass)
|
[
"[email protected]"
] | |
816ae873b0b90fcf3321f06f6a70489ed6eaeaa6
|
c07380914a44df334194f234c33858f357365c19
|
/ENV/lib/python2.7/site-packages/theano/tensor/tests/test_gc.py
|
d1304de7e268985aa6ba0543e87bf76860c9f26b
|
[] |
no_license
|
damianpolan/Music-Genre-Classification
|
318952ae7de5d0b0bdf5676e28071c7b38d0e1c5
|
acd723ae1432ce798866ebb97ef3c484db37e971
|
refs/heads/master
| 2022-12-24T09:23:55.514337 | 2016-03-22T14:49:28 | 2016-03-22T14:49:28 | 42,965,899 | 4 | 4 | null | 2022-12-12T20:26:24 | 2015-09-22T23:05:37 |
Python
|
UTF-8
|
Python
| false | false | 4,130 |
py
|
import cPickle
import sys
import numpy
import theano
from theano import tensor as T
import time
def test_no_reuse():
x = T.lvector()
y = T.lvector()
f = theano.function([x, y], x + y)
#provide both inputs in the first call
f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64'))
try:
f(numpy.ones(10))
except TypeError:
return
assert not 'should not get here'
def test_gc_never_pickles_temporaries():
x = T.dvector()
#print >> sys.stderr, 'BUILDING GRAPH'
for i in xrange(2): # TODO: 30 causes like LONG compilation due to MERGE
if i:
r = r + r/10
else:
r = x
optimizer = None
optimizer = 'fast_run'
for f_linker, g_linker in [
(theano.PerformLinker(allow_gc=True),
theano.PerformLinker(allow_gc=False)),
(theano.OpWiseCLinker(allow_gc=True),
theano.OpWiseCLinker(allow_gc=False))]:
#f_linker has garbage collection
#g_linker has no garbage collection
#print >> sys.stderr, 'COMPILING'
f = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=f_linker))
g = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=g_linker))
len_pre_f = len(cPickle.dumps(f))
len_pre_g = len(cPickle.dumps(g))
# We can't compare the content or the length of the string
# between f and g. 2 reason, we store some timming information
# in float. They won't be the same each time. Different float
# can have different lenght when printed.
def a(fn):
return len(cPickle.dumps(fn.maker))
assert a(f) == a(f) # some sanity checks on the pickling mechanism
assert a(g) == a(g) # some sanity checks on the pickling mechanism
def b(fn):
return len(
cPickle.dumps(
theano.compile.function_module._pickle_Function(
fn)))
assert b(f) == b(f) # some sanity checks on the pickling mechanism
def c(fn):
return len(cPickle.dumps(fn))
assert c(f) == c(f) # some sanity checks on the pickling mechanism
assert c(g) == c(g) # some sanity checks on the pickling mechanism
# now run the function once to create temporaries within the no-gc
# linker
f(numpy.ones(100, dtype='float64'))
g(numpy.ones(100, dtype='float64'))
# serialize the functions again
post_f = cPickle.dumps(f)
post_g = cPickle.dumps(g)
len_post_f = len(post_f)
len_post_g = len(post_g)
# assert that f() didn't cause the function to grow
# allow_gc should leave the function un-changed by calling
assert len_pre_f == len_post_f
# assert that g() didn't cause g to grow because temporaries
# that weren't collected shouldn't be pickled anyway
# Allow for a couple of bytes of difference, since timing info,
# for instance, can be represented as text of varying size.
assert abs(len_post_f - len_post_g) < 16, (
f_linker, len_post_f, len_post_g)
def test_merge_opt_runtime():
"""In the original merge optimization, the following graph took
like caused the MERGE optimizer to exhibit really bad performance
(quadratic? exponential?)
Ironically, there is actually no merging to do in this graph.
"""
x = T.dvector()
for i in xrange(50):
if i:
r = r + r/10
else:
r = x
t = time.time()
f = theano.function([x], r, mode='FAST_COMPILE')
# FAST_RUN does in-place optimizer which requires a lot of
# toposorting, which is actually pretty slow at the moment. This
# test was designed to test MergeOptimizer... so I'm leaving
# toposort optimizations for a later date.
dt = time.time() - t
# it should never take longer than 5 seconds to compile this graph
assert dt < 5.0
|
[
"[email protected]"
] | |
18e75a4ea3347217d6d887dd71f25525056b08a5
|
4331e4d07ae402b9ef9c176164194dc1f5d33f47
|
/10.Examples/01.Basic/20.CreatingPackage/mymath/adv/fib.py
|
8817d00be5fa977a5e9d586daca14489b31087e4
|
[] |
no_license
|
thiminhnhut/creating-modules-packages-python
|
54cc0dc3a3482d28c2b1f7d3e221460ef628438b
|
936600e53ab7069d0f234a1d8f2e6335b65f12a2
|
refs/heads/master
| 2020-03-25T22:41:26.954513 | 2019-08-16T02:58:42 | 2019-08-16T02:58:42 | 144,236,420 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 99 |
py
|
from math import sqrt
def fibonacci(n):
return ((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5))
|
[
"[email protected]"
] | |
a84829ae8a55aa1d175e4dcacd447f99e538bea7
|
49201afc8c3515d9f5cb569f45cd34ba291e84ca
|
/autobahntestsuite/autobahntestsuite/caseset.py
|
2611fd0aadbb7fe5e8808a6db96dedfd3862fc7f
|
[
"Apache-2.0"
] |
permissive
|
crossbario/autobahn-testsuite
|
2f3fe9a46a806550dddb23ed7bc98a94c47d5bd8
|
09cfbf74b0c8e335c6fc7df88e5c88349ca66879
|
refs/heads/master
| 2023-09-06T01:16:06.357182 | 2022-11-02T18:00:25 | 2022-11-02T18:00:25 | 3,762,517 | 718 | 74 |
Apache-2.0
| 2022-01-26T11:07:29 | 2012-03-19T09:59:18 |
Python
|
UTF-8
|
Python
| false | false | 5,570 |
py
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("CaseSet",)
import re
class CaseSet:
def __init__(self, CaseSetName, CaseBaseName, Cases, CaseCategories, CaseSubCategories):
self.CaseSetName = CaseSetName
self.CaseBaseName = CaseBaseName
self.Cases = Cases
self.CaseCategories = CaseCategories
self.CaseSubCategories = CaseSubCategories
## Index:
## "1.2.3" => Index (1-based) of Case1_2_3 in Cases
##
self.CasesIndices = {}
i = 1
for c in self.Cases:
self.CasesIndices[self.caseClasstoId(c)] = i
i += 1
## Index:
## "1.2.3" => Case1_2_3
##
self.CasesById = {}
for c in self.Cases:
self.CasesById[self.caseClasstoId(c)] = c
def caseClasstoId(self, klass):
"""
Class1_2_3 => '1.2.3'
"""
l = len(self.CaseBaseName)
return '.'.join(klass.__name__[l:].split("_"))
def caseClasstoIdTuple(self, klass):
"""
Class1_2_3 => (1, 2, 3)
"""
l = len(self.CaseBaseName)
return tuple([int(x) for x in klass.__name__[l:].split("_")])
def caseIdtoIdTuple(self, id):
"""
'1.2.3' => (1, 2, 3)
"""
return tuple([int(x) for x in id.split('.')])
def caseIdTupletoId(self, idt):
"""
(1, 2, 3) => '1.2.3'
"""
return '.'.join([str(x) for x in list(idt)])
def caseClassToPrettyDescription(self, klass):
"""
Truncates the rest of the description after the first HTML tag
and coalesces whitespace
"""
return ' '.join(klass.DESCRIPTION.split('<')[0].split())
def resolveCasePatternList(self, patterns):
"""
Return list of test cases that match against a list of case patterns.
"""
specCases = []
for c in patterns:
if c.find('*') >= 0:
s = c.replace('.', '\.').replace('*', '.*')
p = re.compile(s)
t = []
for x in self.CasesIndices.keys():
if p.match(x):
t.append(self.caseIdtoIdTuple(x))
for h in sorted(t):
specCases.append(self.caseIdTupletoId(h))
else:
specCases.append(c)
return specCases
def parseSpecCases(self, spec):
"""
Return list of test cases that match against case patterns, minus exclude patterns.
"""
specCases = self.resolveCasePatternList(spec["cases"])
if spec.has_key("exclude-cases"):
excludeCases = self.resolveCasePatternList(spec["exclude-cases"])
else:
excludeCases = []
c = list(set(specCases) - set(excludeCases))
cases = [self.caseIdTupletoId(y) for y in sorted([self.caseIdtoIdTuple(x) for x in c])]
return cases
def parseExcludeAgentCases(self, spec):
"""
Parses "exclude-agent-cases" from the spec into a list of pairs
of agent pattern and case pattern list.
"""
if spec.has_key("exclude-agent-cases"):
ee = spec["exclude-agent-cases"]
pats1 = []
for e in ee:
s1 = "^" + e.replace('.', '\.').replace('*', '.*') + "$"
p1 = re.compile(s1)
pats2 = []
for z in ee[e]:
s2 = "^" + z.replace('.', '\.').replace('*', '.*') + "$"
p2 = re.compile(s2)
pats2.append(p2)
pats1.append((p1, pats2))
return pats1
else:
return []
def checkAgentCaseExclude(self, patterns, agent, case):
"""
Check if we should exclude a specific case for given agent.
"""
for p in patterns:
if p[0].match(agent):
for pp in p[1]:
if pp.match(case):
return True
return False
def getCasesByAgent(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = []
for server in spec['testees']:
agent = server['name']
res2 = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, agent, caseId):
res2.append(self.CasesById[caseId])
if len(res2) > 0:
o = {}
o['name'] = str(server['name'])
o['url'] = str(server['url'])
o['auth'] = server.get('auth', None)
o['cases'] = res2
res.append(o)
return res
def generateCasesByTestee(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = {}
for obj in spec['testees']:
testee = obj['name']
res[testee] = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, testee, caseId):
res[testee].append(self.CasesById[caseId])
return res
|
[
"[email protected]"
] | |
1d345d9a8c171862cfa1e01799e6435fa831594f
|
6f82ba6b76690b983fbbcc65d12f4d2de955da3e
|
/math_functions.py
|
1cead63758acd463df2cc0760d1bbe5fda5aa68f
|
[] |
no_license
|
Hallurs/Assignment-13
|
854535493cc30b5337435ea899aec4912eaee363
|
a31f0e10de3bff359db12b7080daa7d69b121db0
|
refs/heads/main
| 2022-12-18T23:22:09.635711 | 2020-10-08T10:48:22 | 2020-10-08T10:48:22 | 302,302,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,919 |
py
|
import math
# first i create my functions
def sum_natural(n_str):
''' this function takes numbers that are equal to or higher than 2 and sums them up '''
for i in n_str:
if (i.isdigit()) == False:
return None
n_str = int(n_str)
if n_str < 2:
return None
sum_of_int = 0
for i in range(1,n_str+1):
sum_of_int += i
return sum_of_int
def sum_fibonacci(n_str):
''' Sums up the fibonacci numbers higher or equal to 2'''
for i in n_str:
if (i.isdigit()) == False:
return None
n_str = int(n_str)
if n_str < 2:
return None
fibo = 0
nr_1 = 0
nr_2 = 1
# I set the sum as 1 at the start because I would have printed out the first two numbers if the objective was to show the fibonacci numbers
sum_of_fibonacci = 1
for i in range(2,n_str):
fibo = nr_1 + nr_2
nr_1 = nr_2
nr_2 = fibo
sum_of_fibonacci += fibo
# then I use a foor loop to go through the fibonacci numbers and add them to the sum
return sum_of_fibonacci
def approximate_euler(n_str):
''' I use the euler approximation and I sum up the values up to the number given which has to be greater or equal to 2 '''
for i in n_str:
if (i.isdigit()) == False:
return None
n_str = int(n_str)
if n_str < 2:
return None
euler_sum = 0
#I set the euler sum to zero
for i in range(n_str):
euler_sum += (1/math.factorial(i))
return euler_sum
option = 0
while option != 'x':
print("Please choose one of the options below:")
print("a. Display the sum of the first N natural numbers. ")
print("b. Display the sum of the first N Fibonacci numbers. ")
print("c. Display the approximate value of e using N terms.")
print("x. Exit from the program.")
print()
option = input("Enter option: ")
while option != 0:
if option == 'a':
N = input("Enter N: ")
result = sum_natural(N)
if result == None:
print("Error: {} was not a valid number.".format(N))
else:
print("Natural number sum: {}".format(result))
elif option == 'b':
N = input("Enter N: ")
result = sum_fibonacci(N)
if result == None:
print("Error: {} was not a valid number.".format(N))
else:
print("Fibonacci sum: {}".format(result))
elif option == 'c':
N = input("Enter N: ")
result = approximate_euler(N)
if result == None:
print("Error: {} was not a valid number.".format(N))
else:
print("Euler approximation: {:.5f}".format(result)
elif option == 'x':
break
else:
print("Unrecognized option",option)
break
option = input("Enter option: ")
|
[
"[email protected]"
] | |
a2c2e07a8afdcf2c8f91018caceb18c216081b48
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Kivy/kivy/examples/canvas/fbo_canvas.py
|
dd06928bdb98fedb7c9f34cb533e75a733227641
|
[
"MIT"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0cc3f5272ba46eb262d440a5c297b24905c455a2aa930e0baaa5f7f37b3486e6
size 2544
|
[
"[email protected]"
] | |
d25cb19db7748654e2fb0d84d8b6cf18d0112cbb
|
8c21b22001777846ae14840ef5f09b4746659c2d
|
/using_slider_as_palette/work.py
|
4ae4a86cbfbd2b6b0d9a269874b1af404063e791
|
[] |
no_license
|
nola222/opencv_tutorial
|
87fb3ea16e9125156c21d34f0186e2e5a34d5554
|
5b13851f11a03aa9cfb208abf9c423731d26606e
|
refs/heads/master
| 2021-07-01T07:46:09.642434 | 2021-02-23T08:15:30 | 2021-02-23T08:15:30 | 224,819,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,674 |
py
|
# -*- coding: utf-8 -*-
"""
时间: 2021/2/19 11:14
作者: [email protected]
更改记录:
重要说明:
结合鼠标事件,创建一个画板,可自选各种颜色画笔绘制各种图形
"""
import cv2
import numpy as np
def nothing(x):
"""回调函数
Args:
x(int): 滑动条的位置,即滑动条上显示的数字
Returns:
None
"""
pass
drawing = False # 当鼠标按下时变为True, 默认是False
mode = True # mode默认为True绘制矩形,按下“m”变成绘制曲线
ix, iy = 1, -1 # 绘制起点
def draw_func(event, x, y, flags, param):
"""回调函数
Args:
event(int): 事件
x(int): 起点横坐标
y(int): 起点纵坐标
flags(int): 事件,查看是否按下
param:
Returns:
None
"""
red = cv2.getTrackbarPos('R', 'image')
green = cv2.getTrackbarPos('G', 'image')
blue = cv2.getTrackbarPos('B', 'image')
color = (blue, green, red)
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN: # 按下左键,开始画,初始位置为起点
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON: # 鼠标按下并移动画矩形
if drawing:
if mode:
cv2.rectangle(img, (ix, iy), (x, y), color, -1) # 实心矩形
# cv2.rectangle(img, (ix, iy), (x, y), color, 1) # 空心矩形
else:
cv2.circle(img, (x, y), 3, color, 1) # 半径为3的空心圆
# 起点为圆心, 起点到终点为半径的圆(ps:鼠标移动步子小一点)
# r = int(np.sqrt(x - ix) ** 2 + (y - iy) ** 2)
# cv2.circle(img, (x, y), r, color, -1) # 实心圆
elif event == cv2.EVENT_LBUTTONUP: # 鼠标左键松开,停止绘画
drawing = False
else:
pass
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
# 参数一:滑动条名称;参数二:滑动条被放置窗口名称;参数三:滑动条默认位置;参数四:滑动条最大值;参数五:回调函数
cv2.createTrackbar('R', 'image', 0, 255, nothing)
cv2.createTrackbar('G', 'image', 0, 255, nothing)
cv2.createTrackbar('B', 'image', 0, 255, nothing)
cv2.setMouseCallback('image', draw_func) # 回调函数与窗口绑定
while 1:
cv2.imshow('image', img)
key = cv2.waitKey(1) & 0xFF
if key == ord('m'): # 按下m切换模式,将键盘上的m键与模式转换绑定在一起
mode = not mode
elif key == 27: # ESC
break
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
235947219c8ee574917b46a1f375e11b1287752e
|
5cc409e8dbae06f5245475cbe33927f5653c2d4c
|
/Mask R-CNN/nucleusModel.py
|
b7231899f7737018925b91458a9b8d43b489ac1e
|
[
"MIT"
] |
permissive
|
ananthrn/Nucleus-Segmentation
|
69aebc6af9fb0308b9ab47f95b2b6b0580946461
|
5104ced7a72ce4eca8aa5ecb2bd70356fc4ba224
|
refs/heads/master
| 2020-03-25T01:06:53.655411 | 2018-08-02T01:21:48 | 2018-08-02T01:23:12 | 143,222,470 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,444 |
py
|
# AUTHOR: #kr2741
# Adapted from and modified : https://github.com/matterport/Mask_RCNN/tree/master/samples
import matplotlib.pyplot as plt
import os
import sys
import json
import datetime
import numpy as np
import random
import skimage.io
from imgaug import augmenters as iaa
ROOT_DIR = os.path.abspath("Mask_RCNN/")
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import utils
from mrcnn import model as modellib
from mrcnn import visualize
DATASET_DIR = os.path.abspath("../data/data-science-bowl-2018/stage1_train")
dataset_dir = os.path.abspath("../data/data-science-bowl-2018/")
IMAGE_IDS = os.listdir(DATASET_DIR)
DATASET_LEN = len(IMAGE_IDS)
VAL_IMAGE_IDS = list(random.sample(IMAGE_IDS, int(0.3*DATASET_LEN)))
TRAIN_IDS = list(set(IMAGE_IDS) - set(VAL_IMAGE_IDS))
assert (set(TRAIN_IDS + VAL_IMAGE_IDS) == set(IMAGE_IDS)) == True
class NucleusSegmentationTrainingConfig(Config):
NAME = "nucleus-segmentation-training"
IMAGES_PER_GPU = 4
NUM_CLASSES = 2
STEPS_PER_EPOCH = len(TRAIN_IDS) // IMAGES_PER_GPU
VALIDATION_STEPS = max(1, len(VAL_IMAGE_IDS) // IMAGES_PER_GPU)
DETECTION_MIN_CONFIDENCE = 0
BACKBONE = "resnet101"
IMAGE_RESIZE_MODE = "crop"
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
IMAGE_MIN_SCALE = 2.0
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
POST_NMS_ROIS_TRAINING = 1000
POST_NMS_ROIS_INFERENCE = 2000
RPN_NMS_THRESHOLD = 0.9
RPN_TRAIN_ANCHORS_PER_IMAGE = 64
TRAIN_ROIS_PER_IMAGE = 128
MAX_GT_INSTANCES = 200
DETECTION_MAX_INSTANCES = 400
class NucleusSegmentationInferenceConfig(NucleusSegmentationTrainingConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_RESIZE_MODE = "pad64"
RPN_NMS_THRESHOLD = 0.7
class NucleusSegmentationDataset(utils.Dataset):
# loads training data
def load_training_data(self, dataset_dir):
self.add_class("nucleus", 1, "nucleus")
subset_dir = "stage1_train"
dataset_dir = os.path.join(dataset_dir, subset_dir)
image_ids = next(os.walk(dataset_dir))[1]
image_ids = list(set(image_ids) - set(VAL_IMAGE_IDS))
for iid in image_ids:
self.add_image(
"nucleus",
image_id=iid,
path=os.path.join(dataset_dir, iid, "images/{}.png".format(iid)))
def load_val_data(self, dataset_dir):
self.add_class("nucleus", 1, "nucleus")
subset_dir = "stage1_train"
dataset_dir = os.path.join(dataset_dir, subset_dir)
image_ids = set(VAL_IMAGE_IDS)
for iid in image_ids:
self.add_image(
"nucleus",
image_id=iid,
path=os.path.join(dataset_dir, iid, "images/{}.png".format(iid)))
# loads mask for corresponding image
def load_mask(self, image_id):
info = self.image_info[image_id]
mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), "masks")
mask = []
for f in next(os.walk(mask_dir))[2]:
if f.endswith(".png"):
m = skimage.io.imread(os.path.join(mask_dir, f)).astype(np.bool)
mask.append(m)
mask = np.stack(mask, axis=-1)
return mask, np.ones([mask.shape[-1]], dtype=np.int32)
# retrieves path on disk
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == "nucleus":
return info["id"]
else:
super(self.__class__, self).image_reference(image_id)
if __name__ == '__main__':
config = NucleusSegmentationTrainingConfig()
config.display()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir="logs")
"""
PRETRAINED IMAGENET WEIGHTS, available on Keras website
"""
weights_path = model.get_imagenet_weights()
"""
PRETRAINED COCO WEIGHTS, available at https://github.com/matterport/Mask_RCNN/releases
"""
# weights_path = "../mask_rcnn_coco.h5"
#
# weights_path = "logs/nucleus-segmentation-training20180426T1505/mask_rcnn_nucleus-segmentation-training_0020.h5"
# model.load_weights(weights_path, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc","mrcnn_bbox", "mrcnn_mask"])
model.load_weights(weights_path, by_name=True)
train_ds = NucleusSegmentationDataset()
train_ds.load_training_data(dataset_dir)
train_ds.prepare()
val_ds = NucleusSegmentationDataset()
val_ds.load_val_data(dataset_dir)
val_ds.prepare()
augmentation = iaa.SomeOf((0, 2), [
iaa.Fliplr(0.7),
iaa.Flipud(0.4)
#iaa.Multiply((0.8, 1.2), per_channel=0.3),
#iaa.Affine(
# scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
#translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
#rotate=(-25, 25),
#shear=(-8, 8)
])
model.train(train_ds, val_ds,
learning_rate=config.LEARNING_RATE,
epochs = 20,
augmentation=augmentation,
layers='heads')
model.train(train_ds, val_ds,
learning_rate=config.LEARNING_RATE,
epochs = 30,
augmentation=augmentation,
layers='5+')
model.train(train_ds, val_ds,
learning_rate=config.LEARNING_RATE,
epochs = 50,
augmentation=augmentation,
layers='all')
|
[
"[email protected]"
] | |
3093cad6eacfccaaa18b9bc638fb5ae11680bfe4
|
e50dd6f593763e9e78af33b790e263a6cfb96289
|
/SOS_base_alpha_FINAL_testing/Client/registr.py
|
fed713920fc3cdca8de5ea005aea289118952a82
|
[] |
no_license
|
sos-ds/DS
|
f5ae599838375654bf66ff5452d37cf161e8e87f
|
75a6454bc3306e4288de5fdb7c05a29ad9e91149
|
refs/heads/master
| 2020-04-23T16:44:26.627099 | 2019-05-20T16:55:44 | 2019-05-20T16:55:44 | 171,308,031 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,705 |
py
|
import tkinter as tk
from tkinter import *
import socket
import sys
import sqlite3
import subprocess
root = Tk()
root.geometry('500x500')
root.title("SOS reģistrācija")
Fullname=StringVar()
Adrese=StringVar()
Telefon=StringVar()
var = IntVar()
c=StringVar()
var1= IntVar()
def save_info():
Fullname_info = Fullname.get()
Adrese_info = Adrese.get()
Telefon_info = Telefon.get()
Telefon_info = str(Telefon_info)
file = open("file_sūtīt_110.txt", "w", encoding="utf-8")
file.write(Fullname_info)
file.write("\n")
file.write(Adrese_info)
file.write("\n")
file.write(Telefon_info)
file.write("\n")
file.write("Policijas dienesta izsaukums" + "\n")
file.close()
file = open("file_sūtīt_112.txt", "w", encoding="utf-8")
file.write(Fullname_info)
file.write("\n")
file.write(Adrese_info)
file.write("\n")
file.write(Telefon_info)
file.write("\n")
file.write("Valsts ugunsdzesibas un glabsanas dienesta izsaukums" + "\n")
file.close()
file = open("file_sūtīt_113.txt", "w", encoding="utf-8")
file.write(Fullname_info)
file.write("\n")
file.write(Adrese_info)
file.write("\n")
file.write(Telefon_info)
file.write("\n")
file.write("Neatliekamas mediciniskas palidzibas dienesta izsaukums" + "\n")
file.close()
print(" User ", Fullname_info, " has been registered successfully")
Fullname=StringVar()
Adrese=StringVar()
Telefon=StringVar()
var = IntVar()
c=StringVar()
var1= IntVar()
def database():
name1=Fullname.get()
email=Adrese.get()
tel=Telefon.get()
gender=var.get()
country=c.get()
prog=var1.get()
conn = sqlite3.connect('SOS.db')
with conn:
cursor=conn.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS Klienti (Fullname TEXT,Adrese TEXT, Telefon TEXT, Gender TEXT, country TEXT)')
cursor.execute('INSERT INTO Klienti (FullName,Adrese,Telefon,Gender,country) VALUES(?,?,?,?,?)',(name1,email,tel,gender,country,))
conn.commit()
def registr_done():
root.destroy()
subprocess.call("sos_calling.py", shell=True)
label_0 = Label(root, text="SOS reģistrācija",width=20,font=("bold", 20))
label_0.place(x=90,y=53)
label_1 = Label(root, text="Vārds, Uzvārds",width=20,font=("bold", 10))
label_1.place(x=80,y=130)
entry_1 = Entry(root,textvar=Fullname)
entry_1.place(x=240,y=130)
label_2 = Label(root, text="Adrese",width=20,font=("bold", 10))
label_2.place(x=68,y=180)
entry_2 = Entry(root,textvar=Adrese)
entry_2.place(x=240,y=180)
label_5 = Label(root, text="Telefona numurs",width=20,font=("bold", 10))
label_5.place(x=68,y=230)
entry_5 = Entry(root,textvar=Telefon)
entry_5.place(x=240,y=230)
label_3 = Label(root, text="Dzimums",width=20,font=("bold", 10))
label_3.place(x=70,y=280)
Radiobutton(root, text="Sieviešu",padx = 0, variable=var, value=1).place(x=235,y=280)
Radiobutton(root, text="Vīriešu",padx = 40, variable=var, value=2).place(x=290,y=280)
label_4 = Label(root, text="Pilsēta", width=20, font=("bold", 10))
label_4.place(x=70,y=330)
list1 = ['Rīga','Liepāja','Daugavpils','Ventspils','Jūrmala','cita'];
droplist=OptionMenu(root,c, *list1)
droplist.config(width=18)
c.set('Izvēlies savu pilsētu')
droplist.place(x=240,y=330)
button = Button(root, text='Registrēties',width=20,bg='brown',fg='white',
command=lambda:[database(),save_info(),
registr_done()] ).place(x=180,y=400)
#root.quit = tk.Button(root, text="Ienākt", width=20,bg='brown',fg='white',
# command=root.destroy).place(x=180,y=430)
#root.quit.pack(side="bottom")
root.mainloop()
|
[
"[email protected]"
] | |
f695b79388c07e89cfa05c0175e698eadc9d3daa
|
8523daaf19e0250962b454d9c4f87f4c7d71ab9d
|
/models.py
|
d70630bbfa64fe60497c69c7bc15cf28c945160d
|
[] |
no_license
|
madtyn/cris
|
ad2fd35a05efb6829e96bd1aa39c86a0efa8102f
|
a45410e6a67f589ac7d392bebc1ee9725ff4cd1b
|
refs/heads/master
| 2020-11-30T17:42:09.675319 | 2020-01-19T10:36:32 | 2020-01-19T10:36:32 | 230,450,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,617 |
py
|
import datetime as dt
from enum import Enum
from collections import namedtuple
from indexes import FIRST_MONTH_COL, COLS_PER_MONTH
StudentMonth = namedtuple('StudentMonth', ['quota', 'number', 'processed'])
class Months(Enum):
OCTOBER = ('Octubre', 10)
NOVEMBER = ('Noviembre', 11)
DECEMBER = ('Diciembre', 12)
JANUARY = ('Enero', 1)
FEBRUARY = ('Febrero', 2)
MARCH = ('Marzo', 3)
APRIL = ('Abril', 4)
MAY = ('Mayo', 5)
JUNE = ('Junio', 6)
JULY = ('Julio', 7)
AUGUST = ('Agosto', 8)
SEPTEMBER = ('Septiembre', 9)
def __new__(cls, *args, **kwargs):
idx = FIRST_MONTH_COL + (len(cls.__members__) * COLS_PER_MONTH)
obj = object.__new__(cls)
obj._value_ = idx
obj.quota_idx = idx
obj.number_idx = idx + 1
obj.processed_idx = idx + 2
obj.trans = args[0]
obj.ordinal = args[1]
return obj
@classmethod
def get_month(cls, ordinal):
for m in cls:
if ordinal == m.ordinal:
return f'{m!s}'
def get_student_month(self, row):
return StudentMonth(row[self.quota_idx], row[self.number_idx], row[self.processed_idx])
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __str__(self):
return self.trans
class CommonInfo(object):
def __init__(self, teacher, nif, school_year, activity):
self.teacher = teacher
self.nif = nif
self.school_year = school_year
self.activity = activity
class Receipt(object):
header_tag = [
"Nombre del escolar: {student}",
"Número de recibo: {number}",
"Precio mensualidad: {quota}",
]
body_tag = [
"{teacher}, con NIF {nif}, ha recibido de los responsables del alumno / a anteriormente citado las",
"cantidades que se desglosan en este recibo en concepto de pago de la actividad \"{activity}\",",
"realizada durante el curso {school_year}",
]
sign_tag = ["A Coruña, {day} de {month} del {year}", ]
def __init__(self, info, student, student_month):
self.info = info
self.student = student
self.number = student_month.number
self.quota = student_month.quota
def header(self):
d = {
'student': self.student,
'number': self.number,
'quota': self.quota,
}
for line in self.header_tag:
yield line.format(**d)
def body(self):
d = {
'teacher': self.info.teacher,
'nif': self.info.nif,
'activity': self.info.activity,
'school_year': self.info.school_year,
}
for line in self.body_tag:
yield line.format(**d)
def sign(self):
d = {
'day': dt.datetime.today().day,
'month': Months.get_month(dt.datetime.today().month),
'year': dt.datetime.today().year
}
for line in self.sign_tag:
yield line.format(**d)
if __name__ == '__main__':
print()
print()
print()
|
[
"[email protected]"
] | |
7b39b7114c1044d33d060b4c2df7540ed2a28209
|
9167909f6407dbf848d3e82053a266a18067194e
|
/fuzzywuzzy/tests.py
|
e5549b26efd6cdf14a773a7ef103f0c1a8ae91de
|
[] |
no_license
|
ryanfb/reconciliation_service_skeleton
|
58611336b8d2a28e909243ee768e8a4e0a1b9b16
|
ccece354afc13b8e89b66a72a30a37638ce37931
|
refs/heads/master
| 2020-05-25T14:17:44.804848 | 2014-03-24T17:17:36 | 2014-03-24T17:17:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,232 |
py
|
from fuzz import *
import process
import utils
import itertools
import unittest
class RatioTest(unittest.TestCase):
def setUp(self):
self.s1 = "new york mets"
self.s1a = "new york mets"
self.s2 = "new YORK mets"
self.s3 = "the wonderful new york mets"
self.s4 = "new york mets vs atlanta braves"
self.s5 = "atlanta braves vs new york mets"
self.s6 = "new york mets - atlanta braves"
self.cirque_strings = [
"cirque du soleil - zarkana - las vegas",
"cirque du soleil ",
"cirque du soleil las vegas",
"zarkana las vegas",
"las vegas cirque du soleil at the bellagio",
"zarakana - cirque du soleil - bellagio"
]
self.baseball_strings = [
"new york mets vs chicago cubs",
"chicago cubs vs chicago white sox",
"philladelphia phillies vs atlanta braves",
"braves vs mets",
]
def tearDown(self):
pass
def testEqual(self):
self.assertEqual(ratio(self.s1, self.s1a),100)
def testCaseInsensitive(self):
self.assertNotEqual(ratio(self.s1, self.s2),100)
self.assertEqual(ratio(utils.full_process(self.s1), utils.full_process(self.s2)),100)
def testPartialRatio(self):
self.assertEqual(partial_ratio(self.s1, self.s3),100)
def testTokenSortRatio(self):
self.assertEqual(token_sort_ratio(self.s1, self.s1a),100)
def testPartialTokenSortRatio(self):
self.assertEqual(partial_token_sort_ratio(self.s1, self.s1a),100)
self.assertEqual(partial_token_sort_ratio(self.s4, self.s5),100)
def testTokenSetRatio(self):
self.assertEqual(token_set_ratio(self.s4, self.s5),100)
def testPartialTokenSetRatio(self):
self.assertEqual(partial_token_set_ratio(self.s4, self.s5),100)
def testQuickRatioEqual(self):
self.assertEqual(QRatio(self.s1, self.s1a), 100)
def testQuickRatioCaseInsensitive(self):
self.assertEqual(QRatio(self.s1, self.s2), 100)
def testQuickRatioNotEqual(self):
self.assertNotEqual(QRatio(self.s1, self.s3), 100)
def testWRatioEqual(self):
self.assertEqual(WRatio(self.s1, self.s1a), 100)
def testWRatioCaseInsensitive(self):
self.assertEqual(WRatio(self.s1, self.s2), 100)
def testWRatioPartialMatch(self):
# a partial match is scaled by .9
self.assertEqual(WRatio(self.s1, self.s3), 90)
def testWRatioMisorderedMatch(self):
# misordered full matches are scaled by .95
self.assertEqual(WRatio(self.s4, self.s5), 95)
# test processing methods
def testGetBestChoice1(self):
query = "new york mets at atlanta braves"
best = process.extractOne(query, self.baseball_strings)
self.assertEqual(best[0], "braves vs mets")
def testGetBestChoice2(self):
query = "philadelphia phillies at atlanta braves"
best = process.extractOne(query, self.baseball_strings)
self.assertEqual(best[0], self.baseball_strings[2])
def testGetBestChoice3(self):
query = "atlanta braves at philadelphia phillies"
best = process.extractOne(query, self.baseball_strings)
self.assertEqual(best[0], self.baseball_strings[2])
def testGetBestChoice4(self):
query = "chicago cubs vs new york mets"
best = process.extractOne(query, self.baseball_strings)
self.assertEqual(best[0], self.baseball_strings[0])
class ProcessTest(unittest.TestCase):
def setUp(self):
self.s1 = "new york mets"
self.s1a = "new york mets"
self.s2 = "new YORK mets"
self.s3 = "the wonderful new york mets"
self.s4 = "new york mets vs atlanta braves"
self.s5 = "atlanta braves vs new york mets"
self.s6 = "new york mets - atlanta braves"
self.cirque_strings = [
"cirque du soleil - zarkana - las vegas",
"cirque du soleil ",
"cirque du soleil las vegas",
"zarkana las vegas",
"las vegas cirque du soleil at the bellagio",
"zarakana - cirque du soleil - bellagio"
]
self.baseball_strings = [
"new york mets vs chicago cubs",
"chicago cubs vs chicago white sox",
"philladelphia phillies vs atlanta braves",
"braves vs mets",
]
def testWithProcessor(self):
events = [
["chicago cubs vs new york mets", "CitiField", "2011-05-11", "8pm"],
["new york yankees vs boston red sox", "Fenway Park", "2011-05-11", "8pm"],
["atlanta braves vs pittsburgh pirates", "PNC Park", "2011-05-11", "8pm"],
]
query = "new york mets vs chicago cubs"
processor = lambda event: event[0]
best = process.extractOne(query, events, processor=processor)
self.assertEqual(best[0], events[0])
def testWithScorer(self):
choices = [
"new york mets vs chicago cubs",
"chicago cubs at new york mets",
"atlanta braves vs pittsbugh pirates",
"new york yankees vs boston red sox"
]
# in this hypothetical example we care about ordering, so we use quick ratio
query = "new york mets at chicago cubs"
scorer = QRatio
# first, as an example, the normal way would select the "more 'complete' match of choices[1]"
best = process.extractOne(query, choices)
self.assertEqual(best[0], choices[1])
# now, use the custom scorer
best = process.extractOne(query, choices, scorer=scorer)
self.assertEqual(best[0], choices[0])
def testWithCutoff(self):
choices = [
"new york mets vs chicago cubs",
"chicago cubs at new york mets",
"atlanta braves vs pittsbugh pirates",
"new york yankees vs boston red sox"
]
query = "los angeles dodgers vs san francisco giants"
# in this situation, this is an event that does not exist in the list
# we don't want to randomly match to something, so we use a reasonable cutoff
best = process.extractOne(query, choices, score_cutoff=50)
self.assertIsNone(best)
# however if we had no cutoff, something would get returned
best = process.extractOne(query, choices)
self.assertIsNotNone(best)
def testEmptyStrings(self):
choices = [
"",
"new york mets vs chicago cubs",
"new york yankees vs boston red sox",
"",
""
]
query = "new york mets at chicago cubs"
best = process.extractOne(query, choices)
self.assertEqual(best[0], choices[1])
def testNullStrings(self):
choices = [
None,
"new york mets vs chicago cubs",
"new york yankees vs boston red sox",
None,
None
]
query = "new york mets at chicago cubs"
best = process.extractOne(query, choices)
self.assertEqual(best[0], choices[1])
if __name__ == '__main__':
unittest.main() # run all tests
|
[
"[email protected]"
] | |
f2257a66a17f8b82a0bb0a42260b553d534f2889
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/valid_20200616203432.py
|
d8f31778d62ce6a0b3ed7002c575bb9870500ea2
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
py
|
# Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
def valid(str):
address = str.split(".")
numbers = range()
for a in address:
if a
print(address)
valid("172.16.254.01")
|
[
"[email protected]"
] | |
c25ceac5b087067da044a667efdbdb6828f40d20
|
47c102b18fe168ce26d0db31a2e678290e2e2185
|
/HomeWork1.py
|
e09ae846faffed5ac72f45f07ad47db7d44485b7
|
[] |
no_license
|
maredmx/proba-statistics3C
|
54981ff114263328d7d938ae85c9c2499a74255e
|
92db592e98567d6cf78ae4a65c5a47a626c47328
|
refs/heads/master
| 2020-08-30T09:23:45.337604 | 2019-11-25T05:26:54 | 2019-11-25T05:26:54 | 218,332,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,436 |
py
|
#!/usr/bin/env python
# coding: utf-8
# ## HOMEWORK
# ## 1. Print command is used to write text on the screen
# In[8]:
print("Hola mundo")
print("Hi world")
print("Hello earth")
# ## 2. The comments are used to make annotations in the code for the programmer see them
# In[1]:
#this is a commentary
#this is other commentary
print("I learned about how to use the commentaries")
#this is the last commentary
print("Bye world")
# ## 3. The commands of Numbers and Math are used to make number and mathematics operations between the code
# In[2]:
print("I will now count how much money used on the week:")
print("The money that I have daily", 100 * 7 )
print("My cost of the travel to the university", 700 - 150)
# ## 4. The variables and names are used to define a values variables or constants between the code
# In[14]:
salary_of_day = 35
taxas_of_day = 15
total_of_salary = salary_of_day - taxas_of_day
print("I have my salary for day", salary_of_day )
print("and I need pay the the taxas for day", taxas_of_day )
print("My salary really is", total_of_salary )
# ## 5. We have more variables and printing that have the same funtion but are more complex
# In[12]:
name = 'Cristian'
genere = 'male'
age = 22
country = 'Mexico'
print("Hello my name is", name)
print("My genere is", genere)
print("I am my_name years old", age)
print("I'm from ", country )
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.